db_test.cc 208.8 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8 9
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

S
sdong 已提交
10 11
// Introduction of SyncPoint effectively disabled building and running this test
// in Release build.
D
Dmitri Smirnov 已提交
12
// which is a pity, it is a good test
A
Aaron Gao 已提交
13
#include <fcntl.h>
14
#include <algorithm>
15
#include <set>
16
#include <thread>
17
#include <unordered_set>
S
Stanislau Hlebik 已提交
18
#include <utility>
S
sdong 已提交
19 20 21
#ifndef OS_WIN
#include <unistd.h>
#endif
D
David Bernard 已提交
22
#ifdef OS_SOLARIS
D
David Bernard 已提交
23
#include <alloca.h>
D
David Bernard 已提交
24
#endif
25

26
#include "cache/lru_cache.h"
27
#include "db/db_impl/db_impl.h"
28
#include "db/db_test_util.h"
A
Aaron Gao 已提交
29
#include "db/dbformat.h"
I
Igor Canadi 已提交
30
#include "db/job_context.h"
J
jorlow@chromium.org 已提交
31 32
#include "db/version_set.h"
#include "db/write_batch_internal.h"
33
#include "env/mock_env.h"
34
#include "file/filename.h"
35
#include "memtable/hash_linklist_rep.h"
36
#include "monitoring/thread_status_util.h"
D
Dmitri Smirnov 已提交
37
#include "port/port.h"
I
Igor Canadi 已提交
38
#include "port/stack_trace.h"
39 40
#include "rocksdb/cache.h"
#include "rocksdb/compaction_filter.h"
A
agiardullo 已提交
41
#include "rocksdb/convenience.h"
K
Kai Liu 已提交
42
#include "rocksdb/db.h"
43
#include "rocksdb/env.h"
44
#include "rocksdb/experimental.h"
K
Kai Liu 已提交
45
#include "rocksdb/filter_policy.h"
A
agiardullo 已提交
46
#include "rocksdb/options.h"
47
#include "rocksdb/perf_context.h"
48 49
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
A
agiardullo 已提交
50
#include "rocksdb/snapshot.h"
S
Siying Dong 已提交
51
#include "rocksdb/table.h"
52
#include "rocksdb/table_properties.h"
Y
Yueh-Hsuan Chiang 已提交
53
#include "rocksdb/thread_status.h"
54
#include "rocksdb/utilities/checkpoint.h"
A
agiardullo 已提交
55
#include "rocksdb/utilities/optimistic_transaction_db.h"
A
Aaron Gao 已提交
56
#include "rocksdb/utilities/write_batch_with_index.h"
57
#include "table/block_based/block_based_table_factory.h"
58
#include "table/mock_table.h"
59
#include "table/plain/plain_table_factory.h"
S
sdong 已提交
60
#include "table/scoped_arena_iterator.h"
61 62 63
#include "test_util/sync_point.h"
#include "test_util/testharness.h"
#include "test_util/testutil.h"
A
Aaron Gao 已提交
64
#include "util/compression.h"
65
#include "util/file_reader_writer.h"
66
#include "util/mutexlock.h"
L
Lei Jin 已提交
67
#include "util/rate_limiter.h"
A
Aaron Gao 已提交
68 69
#include "util/string_util.h"
#include "utilities/merge_operators.h"
J
jorlow@chromium.org 已提交
70

71
namespace rocksdb {
J
jorlow@chromium.org 已提交
72

73 74 75 76 77
class DBTest : public DBTestBase {
 public:
  DBTest() : DBTestBase("/db_test") {}
};

78 79 80
class DBTestWithParam
    : public DBTest,
      public testing::WithParamInterface<std::tuple<uint32_t, bool>> {
81
 public:
82 83 84 85
  DBTestWithParam() {
    max_subcompactions_ = std::get<0>(GetParam());
    exclusive_manual_compaction_ = std::get<1>(GetParam());
  }
86 87 88 89 90

  // Required if inheriting from testing::WithParamInterface<>
  static void SetUpTestCase() {}
  static void TearDownTestCase() {}

91
  uint32_t max_subcompactions_;
92
  bool exclusive_manual_compaction_;
93
};
J
jorlow@chromium.org 已提交
94

95
TEST_F(DBTest, MockEnvTest) {
96
  std::unique_ptr<MockEnv> env{new MockEnv(Env::Default())};
97 98 99 100
  Options options;
  options.create_if_missing = true;
  options.env = env.get();
  DB* db;
101

102 103
  const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
  const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
K
kailiu 已提交
104

105 106 107 108
  ASSERT_OK(DB::Open(options, "/dir/db", &db));
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
  }
109

110 111 112 113 114
  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }
115

116 117 118 119 120 121 122 123 124 125
  Iterator* iterator = db->NewIterator(ReadOptions());
  iterator->SeekToFirst();
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_TRUE(iterator->Valid());
    ASSERT_TRUE(keys[i] == iterator->key());
    ASSERT_TRUE(vals[i] == iterator->value());
    iterator->Next();
  }
  ASSERT_TRUE(!iterator->Valid());
  delete iterator;
126

A
Aaron Gao 已提交
127 128
// TEST_FlushMemTable() is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
129 130
  DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
  ASSERT_OK(dbi->TEST_FlushMemTable());
131

132 133 134 135 136
  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }
A
Aaron Gao 已提交
137
#endif  // ROCKSDB_LITE
138

139 140
  delete db;
}
141

A
Andrew Kryczka 已提交
142 143 144
// NewMemEnv returns nullptr in ROCKSDB_LITE since class InMemoryEnv isn't
// defined.
#ifndef ROCKSDB_LITE
145
TEST_F(DBTest, MemEnvTest) {
146
  std::unique_ptr<Env> env{NewMemEnv(Env::Default())};
147 148 149 150
  Options options;
  options.create_if_missing = true;
  options.env = env.get();
  DB* db;
151

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
  const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
  const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};

  ASSERT_OK(DB::Open(options, "/dir/db", &db));
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
  }

  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }

  Iterator* iterator = db->NewIterator(ReadOptions());
  iterator->SeekToFirst();
  for (size_t i = 0; i < 3; ++i) {
    ASSERT_TRUE(iterator->Valid());
    ASSERT_TRUE(keys[i] == iterator->key());
    ASSERT_TRUE(vals[i] == iterator->value());
    iterator->Next();
  }
  ASSERT_TRUE(!iterator->Valid());
  delete iterator;

  DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
  ASSERT_OK(dbi->TEST_FlushMemTable());

  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }

  delete db;

  options.create_if_missing = false;
  ASSERT_OK(DB::Open(options, "/dir/db", &db));
  for (size_t i = 0; i < 3; ++i) {
    std::string res;
    ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
    ASSERT_TRUE(res == vals[i]);
  }
  delete db;
J
jorlow@chromium.org 已提交
196
}
I
Islam AbdelRahman 已提交
197
#endif  // ROCKSDB_LITE
J
jorlow@chromium.org 已提交
198

I
Igor Sugak 已提交
199
TEST_F(DBTest, WriteEmptyBatch) {
S
sdong 已提交
200
  Options options = CurrentOptions();
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);

  ASSERT_OK(Put(1, "foo", "bar"));
  WriteOptions wo;
  wo.sync = true;
  wo.disableWAL = false;
  WriteBatch empty_batch;
  ASSERT_OK(dbfull()->Write(wo, &empty_batch));

  // make sure we can re-open it.
  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
  ASSERT_EQ("bar", Get(1, "foo"));
}

M
Maysam Yabandeh 已提交
217 218 219 220 221 222 223 224
TEST_F(DBTest, SkipDelay) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);

  for (bool sync : {true, false}) {
    for (bool disableWAL : {true, false}) {
225 226 227 228
      if (sync && disableWAL) {
        // sync and disableWAL is incompatible.
        continue;
      }
M
Maysam Yabandeh 已提交
229 230 231 232 233 234 235 236
      // Use a small number to ensure a large delay that is still effective
      // when we do Put
      // TODO(myabandeh): this is time dependent and could potentially make
      // the test flaky
      auto token = dbfull()->TEST_write_controler().GetDelayToken(1);
      std::atomic<int> sleep_count(0);
      rocksdb::SyncPoint::GetInstance()->SetCallBack(
          "DBImpl::DelayWrite:Sleep",
237
          [&](void* /*arg*/) { sleep_count.fetch_add(1); });
M
Maysam Yabandeh 已提交
238 239 240
      std::atomic<int> wait_count(0);
      rocksdb::SyncPoint::GetInstance()->SetCallBack(
          "DBImpl::DelayWrite:Wait",
241
          [&](void* /*arg*/) { wait_count.fetch_add(1); });
M
Maysam Yabandeh 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
      rocksdb::SyncPoint::GetInstance()->EnableProcessing();

      WriteOptions wo;
      wo.sync = sync;
      wo.disableWAL = disableWAL;
      wo.no_slowdown = true;
      dbfull()->Put(wo, "foo", "bar");
      // We need the 2nd write to trigger delay. This is because delay is
      // estimated based on the last write size which is 0 for the first write.
      ASSERT_NOK(dbfull()->Put(wo, "foo2", "bar2"));
      ASSERT_GE(sleep_count.load(), 0);
      ASSERT_GE(wait_count.load(), 0);
      token.reset();

      token = dbfull()->TEST_write_controler().GetDelayToken(1000000000);
      wo.no_slowdown = false;
      ASSERT_OK(dbfull()->Put(wo, "foo3", "bar3"));
      ASSERT_GE(sleep_count.load(), 1);
      token.reset();
    }
  }
}

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
TEST_F(DBTest, MixedSlowdownOptions) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);
  std::vector<port::Thread> threads;
  std::atomic<int> thread_num(0);

  std::function<void()> write_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = false;
    ASSERT_OK(dbfull()->Put(wo, key, "bar"));
  };
  std::function<void()> write_no_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = true;
    ASSERT_NOK(dbfull()->Put(wo, key, "bar"));
  };
  // Use a small number to ensure a large delay that is still effective
  // when we do Put
  // TODO(myabandeh): this is time dependent and could potentially make
  // the test flaky
  auto token = dbfull()->TEST_write_controler().GetDelayToken(1);
  std::atomic<int> sleep_count(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:BeginWriteStallDone",
      [&](void* /*arg*/) {
        sleep_count.fetch_add(1);
        if (threads.empty()) {
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_slowdown_func);
          }
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_no_slowdown_func);
          }
        }
      });
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();

  WriteOptions wo;
  wo.sync = false;
  wo.disableWAL = false;
  wo.no_slowdown = false;
  dbfull()->Put(wo, "foo", "bar");
  // We need the 2nd write to trigger delay. This is because delay is
  // estimated based on the last write size which is 0 for the first write.
  ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2"));
          token.reset();

  for (auto& t : threads) {
    t.join();
  }
  ASSERT_GE(sleep_count.load(), 1);

  wo.no_slowdown = true;
  ASSERT_OK(dbfull()->Put(wo, "foo3", "bar"));
}

TEST_F(DBTest, MixedSlowdownOptionsInQueue) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);
  std::vector<port::Thread> threads;
  std::atomic<int> thread_num(0);

  std::function<void()> write_no_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = true;
    ASSERT_NOK(dbfull()->Put(wo, key, "bar"));
  };
  // Use a small number to ensure a large delay that is still effective
  // when we do Put
  // TODO(myabandeh): this is time dependent and could potentially make
  // the test flaky
  auto token = dbfull()->TEST_write_controler().GetDelayToken(1);
  std::atomic<int> sleep_count(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:Sleep",
      [&](void* /*arg*/) {
        sleep_count.fetch_add(1);
        if (threads.empty()) {
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_no_slowdown_func);
          }
          // Sleep for 2s to allow the threads to insert themselves into the
          // write queue
          env_->SleepForMicroseconds(3000000ULL);
        }
      });
  std::atomic<int> wait_count(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:Wait",
      [&](void* /*arg*/) { wait_count.fetch_add(1); });
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();

  WriteOptions wo;
  wo.sync = false;
  wo.disableWAL = false;
  wo.no_slowdown = false;
  dbfull()->Put(wo, "foo", "bar");
  // We need the 2nd write to trigger delay. This is because delay is
  // estimated based on the last write size which is 0 for the first write.
  ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2"));
          token.reset();

  for (auto& t : threads) {
    t.join();
  }
  ASSERT_EQ(sleep_count.load(), 1);
  ASSERT_GE(wait_count.load(), 0);
}

TEST_F(DBTest, MixedSlowdownOptionsStop) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;
  CreateAndReopenWithCF({"pikachu"}, options);
  std::vector<port::Thread> threads;
  std::atomic<int> thread_num(0);

  std::function<void()> write_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = false;
    ASSERT_OK(dbfull()->Put(wo, key, "bar"));
  };
  std::function<void()> write_no_slowdown_func = [&]() {
    int a = thread_num.fetch_add(1);
    std::string key = "foo" + std::to_string(a);
    WriteOptions wo;
    wo.no_slowdown = true;
    ASSERT_NOK(dbfull()->Put(wo, key, "bar"));
  };
  std::function<void()> wakeup_writer = [&]() {
    dbfull()->mutex_.Lock();
    dbfull()->bg_cv_.SignalAll();
    dbfull()->mutex_.Unlock();
  };
  // Use a small number to ensure a large delay that is still effective
  // when we do Put
  // TODO(myabandeh): this is time dependent and could potentially make
  // the test flaky
  auto token = dbfull()->TEST_write_controler().GetStopToken();
  std::atomic<int> wait_count(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:Wait",
      [&](void* /*arg*/) {
        wait_count.fetch_add(1);
        if (threads.empty()) {
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_slowdown_func);
          }
          for (int i = 0; i < 2; ++i) {
            threads.emplace_back(write_no_slowdown_func);
          }
          // Sleep for 2s to allow the threads to insert themselves into the
          // write queue
          env_->SleepForMicroseconds(3000000ULL);
        }
        token.reset();
        threads.emplace_back(wakeup_writer);
      });
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();

  WriteOptions wo;
  wo.sync = false;
  wo.disableWAL = false;
  wo.no_slowdown = false;
  dbfull()->Put(wo, "foo", "bar");
  // We need the 2nd write to trigger delay. This is because delay is
  // estimated based on the last write size which is 0 for the first write.
  ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2"));
          token.reset();

  for (auto& t : threads) {
    t.join();
  }
  ASSERT_GE(wait_count.load(), 1);

  wo.no_slowdown = true;
  ASSERT_OK(dbfull()->Put(wo, "foo3", "bar"));
}
I
Islam AbdelRahman 已提交
455
#ifndef ROCKSDB_LITE
L
Lei Jin 已提交
456

I
Igor Sugak 已提交
457
TEST_F(DBTest, LevelLimitReopen) {
458
  Options options = CurrentOptions();
L
Lei Jin 已提交
459
  CreateAndReopenWithCF({"pikachu"}, options);
460 461 462

  const std::string value(1024 * 1024, ' ');
  int i = 0;
463 464
  while (NumTableFilesAtLevel(2, 1) == 0) {
    ASSERT_OK(Put(1, Key(i++), value));
465 466 467
  }

  options.num_levels = 1;
468
  options.max_bytes_for_level_multiplier_additional.resize(1, 1);
L
Lei Jin 已提交
469
  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
470
  ASSERT_EQ(s.IsInvalidArgument(), true);
471
  ASSERT_EQ(s.ToString(),
472
            "Invalid argument: db has more levels than options.num_levels");
473 474

  options.num_levels = 10;
475
  options.max_bytes_for_level_multiplier_additional.resize(10, 1);
L
Lei Jin 已提交
476
  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
477
}
I
Islam AbdelRahman 已提交
478
#endif  // ROCKSDB_LITE
479

J
jorlow@chromium.org 已提交
480

A
Andres Noetzli 已提交
481 482 483 484 485 486 487 488 489
TEST_F(DBTest, PutSingleDeleteGet) {
  do {
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
    ASSERT_OK(Put(1, "foo", "v1"));
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_OK(Put(1, "foo2", "v2"));
    ASSERT_EQ("v2", Get(1, "foo2"));
    ASSERT_OK(SingleDelete(1, "foo"));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
490 491 492 493 494
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
A
Andres Noetzli 已提交
495 496
}

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
TEST_F(DBTest, ReadFromPersistedTier) {
  do {
    Random rnd(301);
    Options options = CurrentOptions();
    for (int disableWAL = 0; disableWAL <= 1; ++disableWAL) {
      CreateAndReopenWithCF({"pikachu"}, options);
      WriteOptions wopt;
      wopt.disableWAL = (disableWAL == 1);
      // 1st round: put but not flush
      ASSERT_OK(db_->Put(wopt, handles_[1], "foo", "first"));
      ASSERT_OK(db_->Put(wopt, handles_[1], "bar", "one"));
      ASSERT_EQ("first", Get(1, "foo"));
      ASSERT_EQ("one", Get(1, "bar"));

      // Read directly from persited data.
      ReadOptions ropt;
      ropt.read_tier = kPersistedTier;
      std::string value;
      if (wopt.disableWAL) {
        // as data has not yet being flushed, we expect not found.
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound());
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound());
      } else {
        ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value));
        ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value));
      }

      // Multiget
      std::vector<ColumnFamilyHandle*> multiget_cfs;
      multiget_cfs.push_back(handles_[1]);
      multiget_cfs.push_back(handles_[1]);
      std::vector<Slice> multiget_keys;
      multiget_keys.push_back("foo");
      multiget_keys.push_back("bar");
      std::vector<std::string> multiget_values;
      auto statuses =
          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
      if (wopt.disableWAL) {
        ASSERT_TRUE(statuses[0].IsNotFound());
        ASSERT_TRUE(statuses[1].IsNotFound());
      } else {
        ASSERT_OK(statuses[0]);
        ASSERT_OK(statuses[1]);
      }

      // 2nd round: flush and put a new value in memtable.
      ASSERT_OK(Flush(1));
      ASSERT_OK(db_->Put(wopt, handles_[1], "rocksdb", "hello"));

      // once the data has been flushed, we are able to get the
      // data when kPersistedTier is used.
      ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).ok());
      ASSERT_EQ(value, "first");
      ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok());
      ASSERT_EQ(value, "one");
      if (wopt.disableWAL) {
        ASSERT_TRUE(
            db_->Get(ropt, handles_[1], "rocksdb", &value).IsNotFound());
      } else {
        ASSERT_OK(db_->Get(ropt, handles_[1], "rocksdb", &value));
        ASSERT_EQ(value, "hello");
      }

      // Expect same result in multiget
      multiget_cfs.push_back(handles_[1]);
      multiget_keys.push_back("rocksdb");
      statuses =
          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
      ASSERT_TRUE(statuses[0].ok());
      ASSERT_EQ("first", multiget_values[0]);
      ASSERT_TRUE(statuses[1].ok());
      ASSERT_EQ("one", multiget_values[1]);
      if (wopt.disableWAL) {
        ASSERT_TRUE(statuses[2].IsNotFound());
      } else {
        ASSERT_OK(statuses[2]);
      }

      // 3rd round: delete and flush
      ASSERT_OK(db_->Delete(wopt, handles_[1], "foo"));
      Flush(1);
      ASSERT_OK(db_->Delete(wopt, handles_[1], "bar"));

      ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound());
      if (wopt.disableWAL) {
        // Still expect finding the value as its delete has not yet being
        // flushed.
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok());
        ASSERT_EQ(value, "one");
      } else {
        ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound());
      }
      ASSERT_TRUE(db_->Get(ropt, handles_[1], "rocksdb", &value).ok());
      ASSERT_EQ(value, "hello");

      statuses =
          db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values);
      ASSERT_TRUE(statuses[0].IsNotFound());
      if (wopt.disableWAL) {
        ASSERT_TRUE(statuses[1].ok());
        ASSERT_EQ("one", multiget_values[1]);
      } else {
        ASSERT_TRUE(statuses[1].IsNotFound());
      }
      ASSERT_TRUE(statuses[2].ok());
      ASSERT_EQ("hello", multiget_values[2]);
      if (wopt.disableWAL == 0) {
        DestroyAndReopen(options);
      }
    }
607
  } while (ChangeOptions());
608 609
}

A
Andres Noetzli 已提交
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
TEST_F(DBTest, SingleDeleteFlush) {
  // Test to check whether flushing preserves a single delete hidden
  // behind a put.
  do {
    Random rnd(301);

    Options options = CurrentOptions();
    options.disable_auto_compactions = true;
    CreateAndReopenWithCF({"pikachu"}, options);

    // Put values on second level (so that they will not be in the same
    // compaction as the other operations.
    Put(1, "foo", "first");
    Put(1, "bar", "one");
    ASSERT_OK(Flush(1));
    MoveFilesToLevel(2, 1);

    // (Single) delete hidden by a put
    SingleDelete(1, "foo");
    Put(1, "foo", "second");
    Delete(1, "bar");
    Put(1, "bar", "two");
    ASSERT_OK(Flush(1));

    SingleDelete(1, "foo");
    Delete(1, "bar");
    ASSERT_OK(Flush(1));

    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
                           nullptr);

    ASSERT_EQ("NOT_FOUND", Get(1, "bar"));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
643 644 645 646 647
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
A
Andres Noetzli 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
}

TEST_F(DBTest, SingleDeletePutFlush) {
  // Single deletes that encounter the matching put in a flush should get
  // removed.
  do {
    Random rnd(301);

    Options options = CurrentOptions();
    options.disable_auto_compactions = true;
    CreateAndReopenWithCF({"pikachu"}, options);

    Put(1, "foo", Slice());
    Put(1, "a", Slice());
    SingleDelete(1, "a");
    ASSERT_OK(Flush(1));

    ASSERT_EQ("[ ]", AllEntriesFor("a", 1));
666 667 668 669 670
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
A
Andres Noetzli 已提交
671 672
}

673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
// Disable because not all platform can run it.
// It requires more than 9GB memory to run it, With single allocation
// of more than 3GB.
TEST_F(DBTest, DISABLED_SanitizeVeryVeryLargeValue) {
  const size_t kValueSize = 4 * size_t{1024 * 1024 * 1024};  // 4GB value
  std::string raw(kValueSize, 'v');
  Options options = CurrentOptions();
  options.env = env_;
  options.merge_operator = MergeOperators::CreatePutOperator();
  options.write_buffer_size = 100000;  // Small write buffer
  options.paranoid_checks = true;
  DestroyAndReopen(options);

  ASSERT_OK(Put("boo", "v1"));
  ASSERT_TRUE(Put("foo", raw).IsInvalidArgument());
  ASSERT_TRUE(Merge("foo", raw).IsInvalidArgument());

  WriteBatch wb;
  ASSERT_TRUE(wb.Put("foo", raw).IsInvalidArgument());
  ASSERT_TRUE(wb.Merge("foo", raw).IsInvalidArgument());

  Slice value_slice = raw;
  Slice key_slice = "foo";
  SliceParts sp_key(&key_slice, 1);
  SliceParts sp_value(&value_slice, 1);

  ASSERT_TRUE(wb.Put(sp_key, sp_value).IsInvalidArgument());
  ASSERT_TRUE(wb.Merge(sp_key, sp_value).IsInvalidArgument());
}

S
sdong 已提交
703 704 705 706 707 708 709 710 711 712
// Disable because not all platform can run it.
// It requires more than 9GB memory to run it, With single allocation
// of more than 3GB.
TEST_F(DBTest, DISABLED_VeryLargeValue) {
  const size_t kValueSize = 3221225472u;  // 3GB value
  const size_t kKeySize = 8388608u;       // 8MB key
  std::string raw(kValueSize, 'v');
  std::string key1(kKeySize, 'c');
  std::string key2(kKeySize, 'd');

S
sdong 已提交
713
  Options options = CurrentOptions();
S
sdong 已提交
714 715 716 717 718 719 720 721 722 723 724 725
  options.env = env_;
  options.write_buffer_size = 100000;  // Small write buffer
  options.paranoid_checks = true;
  DestroyAndReopen(options);

  ASSERT_OK(Put("boo", "v1"));
  ASSERT_OK(Put("foo", "v1"));
  ASSERT_OK(Put(key1, raw));
  raw[0] = 'w';
  ASSERT_OK(Put(key2, raw));
  dbfull()->TEST_WaitForFlushMemTable();

Y
Yi Wu 已提交
726
#ifndef ROCKSDB_LITE
S
sdong 已提交
727
  ASSERT_EQ(1, NumTableFilesAtLevel(0));
Y
Yi Wu 已提交
728
#endif  // !ROCKSDB_LITE
S
sdong 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758

  std::string value;
  Status s = db_->Get(ReadOptions(), key1, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('v', value[0]);

  s = db_->Get(ReadOptions(), key2, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('w', value[0]);

  // Compact all files.
  Flush();
  db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);

  // Check DB is not in read-only state.
  ASSERT_OK(Put("boo", "v1"));

  s = db_->Get(ReadOptions(), key1, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('v', value[0]);

  s = db_->Get(ReadOptions(), key2, &value);
  ASSERT_OK(s);
  ASSERT_EQ(kValueSize, value.size());
  ASSERT_EQ('w', value[0]);
}

I
Igor Sugak 已提交
759
TEST_F(DBTest, GetFromImmutableLayer) {
S
Sanjay Ghemawat 已提交
760
  do {
S
sdong 已提交
761
    Options options = CurrentOptions();
S
Sanjay Ghemawat 已提交
762
    options.env = env_;
L
Lei Jin 已提交
763
    CreateAndReopenWithCF({"pikachu"}, options);
764

765 766
    ASSERT_OK(Put(1, "foo", "v1"));
    ASSERT_EQ("v1", Get(1, "foo"));
767

I
Igor Canadi 已提交
768 769
    // Block sync calls
    env_->delay_sstable_sync_.store(true, std::memory_order_release);
A
Aaron Gao 已提交
770 771
    Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
    Put(1, "k2", std::string(100000, 'y'));  // Trigger flush
772 773
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_EQ("NOT_FOUND", Get(0, "foo"));
I
Igor Canadi 已提交
774 775
    // Release sync calls
    env_->delay_sstable_sync_.store(false, std::memory_order_release);
S
Sanjay Ghemawat 已提交
776
  } while (ChangeOptions());
777 778 779
}


I
Igor Sugak 已提交
780
TEST_F(DBTest, GetLevel0Ordering) {
S
Sanjay Ghemawat 已提交
781
  do {
L
Lei Jin 已提交
782
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
S
Sanjay Ghemawat 已提交
783 784 785 786
    // Check that we process level-0 files in correct order.  The code
    // below generates two level-0 files where the earlier one comes
    // before the later one in the level-0 file list since the earlier
    // one has a smaller "smallest" key.
787 788 789 790 791 792
    ASSERT_OK(Put(1, "bar", "b"));
    ASSERT_OK(Put(1, "foo", "v1"));
    ASSERT_OK(Flush(1));
    ASSERT_OK(Put(1, "foo", "v2"));
    ASSERT_OK(Flush(1));
    ASSERT_EQ("v2", Get(1, "foo"));
S
Sanjay Ghemawat 已提交
793
  } while (ChangeOptions());
794 795
}

I
Igor Sugak 已提交
796
TEST_F(DBTest, WrongLevel0Config) {
797 798 799 800 801 802 803 804 805
  Options options = CurrentOptions();
  Close();
  ASSERT_OK(DestroyDB(dbname_, options));
  options.level0_stop_writes_trigger = 1;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_file_num_compaction_trigger = 3;
  ASSERT_OK(DB::Open(options, dbname_, &db_));
}

I
Islam AbdelRahman 已提交
806
#ifndef ROCKSDB_LITE
I
Igor Sugak 已提交
807
TEST_F(DBTest, GetOrderedByLevels) {
S
Sanjay Ghemawat 已提交
808
  do {
L
Lei Jin 已提交
809
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
810 811 812 813 814 815 816
    ASSERT_OK(Put(1, "foo", "v1"));
    Compact(1, "a", "z");
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_OK(Put(1, "foo", "v2"));
    ASSERT_EQ("v2", Get(1, "foo"));
    ASSERT_OK(Flush(1));
    ASSERT_EQ("v2", Get(1, "foo"));
S
Sanjay Ghemawat 已提交
817
  } while (ChangeOptions());
818 819
}

I
Igor Sugak 已提交
820
TEST_F(DBTest, GetPicksCorrectFile) {
S
Sanjay Ghemawat 已提交
821
  do {
L
Lei Jin 已提交
822
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
S
Sanjay Ghemawat 已提交
823
    // Arrange to have multiple files in a non-level-0 level.
824 825 826 827 828 829 830 831 832
    ASSERT_OK(Put(1, "a", "va"));
    Compact(1, "a", "b");
    ASSERT_OK(Put(1, "x", "vx"));
    Compact(1, "x", "y");
    ASSERT_OK(Put(1, "f", "vf"));
    Compact(1, "f", "g");
    ASSERT_EQ("va", Get(1, "a"));
    ASSERT_EQ("vf", Get(1, "f"));
    ASSERT_EQ("vx", Get(1, "x"));
S
Sanjay Ghemawat 已提交
833
  } while (ChangeOptions());
834 835
}

I
Igor Sugak 已提交
836
TEST_F(DBTest, GetEncountersEmptyLevel) {
S
Sanjay Ghemawat 已提交
837
  do {
I
Igor Canadi 已提交
838
    Options options = CurrentOptions();
L
Lei Jin 已提交
839
    CreateAndReopenWithCF({"pikachu"}, options);
S
Sanjay Ghemawat 已提交
840 841 842 843 844 845
    // Arrange for the following to happen:
    //   * sstable A in level 0
    //   * nothing in level 1
    //   * sstable B in level 2
    // Then do enough Get() calls to arrange for an automatic compaction
    // of sstable A.  A bug would cause the compaction to be marked as
C
clark.kang 已提交
846
    // occurring at level 1 (instead of the correct level 0).
S
Sanjay Ghemawat 已提交
847 848

    // Step 1: First place sstables in levels 0 and 2
849 850 851 852 853 854 855 856 857 858
    Put(1, "a", "begin");
    Put(1, "z", "end");
    ASSERT_OK(Flush(1));
    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
    Put(1, "a", "begin");
    Put(1, "z", "end");
    ASSERT_OK(Flush(1));
    ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
    ASSERT_GT(NumTableFilesAtLevel(2, 1), 0);
859

S
Sanjay Ghemawat 已提交
860
    // Step 2: clear level 1 if necessary.
861 862 863 864
    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1);
    ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
    ASSERT_EQ(NumTableFilesAtLevel(2, 1), 1);
S
Sanjay Ghemawat 已提交
865

H
heyongqiang 已提交
866 867
    // Step 3: read a bunch of times
    for (int i = 0; i < 1000; i++) {
868
      ASSERT_EQ("NOT_FOUND", Get(1, "missing"));
S
Sanjay Ghemawat 已提交
869
    }
H
heyongqiang 已提交
870 871

    // Step 4: Wait for compaction to finish
872
    dbfull()->TEST_WaitForCompact();
H
heyongqiang 已提交
873

874
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1);  // XXX
I
Igor Canadi 已提交
875
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
876
}
I
Islam AbdelRahman 已提交
877
#endif  // ROCKSDB_LITE
878

Y
Yi Wu 已提交
879
TEST_F(DBTest, FlushMultipleMemtable) {
V
Venkatesh Radhakrishnan 已提交
880 881
  do {
    Options options = CurrentOptions();
Y
Yi Wu 已提交
882 883 884 885 886
    WriteOptions writeOpt = WriteOptions();
    writeOpt.disableWAL = true;
    options.max_write_buffer_number = 4;
    options.min_write_buffer_number_to_merge = 3;
    options.max_write_buffer_number_to_maintain = -1;
V
Venkatesh Radhakrishnan 已提交
887
    CreateAndReopenWithCF({"pikachu"}, options);
Y
Yi Wu 已提交
888
    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1"));
V
Venkatesh Radhakrishnan 已提交
889
    ASSERT_OK(Flush(1));
Y
Yi Wu 已提交
890
    ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1"));
V
Venkatesh Radhakrishnan 已提交
891

Y
Yi Wu 已提交
892 893 894
    ASSERT_EQ("v1", Get(1, "foo"));
    ASSERT_EQ("v1", Get(1, "bar"));
    ASSERT_OK(Flush(1));
895
  } while (ChangeCompactOptions());
896
}
Y
Yi Wu 已提交
897 898 899 900 901 902 903 904 905 906 907
#ifndef ROCKSDB_LITE
TEST_F(DBTest, FlushSchedule) {
  Options options = CurrentOptions();
  options.disable_auto_compactions = true;
  options.level0_stop_writes_trigger = 1 << 10;
  options.level0_slowdown_writes_trigger = 1 << 10;
  options.min_write_buffer_number_to_merge = 1;
  options.max_write_buffer_number_to_maintain = 1;
  options.max_write_buffer_number = 2;
  options.write_buffer_size = 120 * 1024;
  CreateAndReopenWithCF({"pikachu"}, options);
D
Dmitri Smirnov 已提交
908
  std::vector<port::Thread> threads;
909

Y
Yi Wu 已提交
910 911 912 913 914 915 916 917 918 919 920 921
  std::atomic<int> thread_num(0);
  // each column family will have 5 thread, each thread generating 2 memtables.
  // each column family should end up with 10 table files
  std::function<void()> fill_memtable_func = [&]() {
    int a = thread_num.fetch_add(1);
    Random rnd(a);
    WriteOptions wo;
    // this should fill up 2 memtables
    for (int k = 0; k < 5000; ++k) {
      ASSERT_OK(db_->Put(wo, handles_[a & 1], RandomString(&rnd, 13), ""));
    }
  };
S
sdong 已提交
922

Y
Yi Wu 已提交
923 924
  for (int i = 0; i < 10; ++i) {
    threads.emplace_back(fill_memtable_func);
S
sdong 已提交
925 926
  }

Y
Yi Wu 已提交
927 928 929
  for (auto& t : threads) {
    t.join();
  }
S
sdong 已提交
930

Y
Yi Wu 已提交
931 932 933 934 935 936
  auto default_tables = GetNumberOfSstFilesForColumnFamily(db_, "default");
  auto pikachu_tables = GetNumberOfSstFilesForColumnFamily(db_, "pikachu");
  ASSERT_LE(default_tables, static_cast<uint64_t>(10));
  ASSERT_GT(default_tables, static_cast<uint64_t>(0));
  ASSERT_LE(pikachu_tables, static_cast<uint64_t>(10));
  ASSERT_GT(pikachu_tables, static_cast<uint64_t>(0));
S
sdong 已提交
937
}
Y
Yi Wu 已提交
938
#endif  // ROCKSDB_LITE
S
sdong 已提交
939

Y
Yi Wu 已提交
940 941 942
namespace {
class KeepFilter : public CompactionFilter {
 public:
943 944 945
  bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
              std::string* /*new_value*/,
              bool* /*value_changed*/) const override {
Y
Yi Wu 已提交
946 947
    return false;
  }
J
jorlow@chromium.org 已提交
948

949
  const char* Name() const override { return "KeepFilter"; }
Y
Yi Wu 已提交
950
};
951

Y
Yi Wu 已提交
952 953 954 955
class KeepFilterFactory : public CompactionFilterFactory {
 public:
  explicit KeepFilterFactory(bool check_context = false)
      : check_context_(check_context) {}
J
jorlow@chromium.org 已提交
956

957
  std::unique_ptr<CompactionFilter> CreateCompactionFilter(
Y
Yi Wu 已提交
958 959 960 961 962 963 964
      const CompactionFilter::Context& context) override {
    if (check_context_) {
      EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
      EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
    }
    return std::unique_ptr<CompactionFilter>(new KeepFilter());
  }
965

966
  const char* Name() const override { return "KeepFilterFactory"; }
Y
Yi Wu 已提交
967 968 969 970
  bool check_context_;
  std::atomic_bool expect_full_compaction_;
  std::atomic_bool expect_manual_compaction_;
};
971

Y
Yi Wu 已提交
972 973 974
class DelayFilter : public CompactionFilter {
 public:
  explicit DelayFilter(DBTestBase* d) : db_test(d) {}
975 976 977
  bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
              std::string* /*new_value*/,
              bool* /*value_changed*/) const override {
Y
Yi Wu 已提交
978 979 980
    db_test->env_->addon_time_.fetch_add(1000);
    return true;
  }
981

982
  const char* Name() const override { return "DelayFilter"; }
983

Y
Yi Wu 已提交
984 985 986
 private:
  DBTestBase* db_test;
};
987

Y
Yi Wu 已提交
988 989 990
class DelayFilterFactory : public CompactionFilterFactory {
 public:
  explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {}
991
  std::unique_ptr<CompactionFilter> CreateCompactionFilter(
A
Andrew Kryczka 已提交
992
      const CompactionFilter::Context& /*context*/) override {
Y
Yi Wu 已提交
993 994
    return std::unique_ptr<CompactionFilter>(new DelayFilter(db_test));
  }
995

996
  const char* Name() const override { return "DelayFilterFactory"; }
J
jorlow@chromium.org 已提交
997

Y
Yi Wu 已提交
998 999 1000 1001
 private:
  DBTestBase* db_test;
};
}  // namespace
J
jorlow@chromium.org 已提交
1002

Y
Yi Wu 已提交
1003 1004 1005 1006 1007 1008
#ifndef ROCKSDB_LITE

static std::string CompressibleString(Random* rnd, int len) {
  std::string r;
  test::CompressibleString(rnd, 0.8, len, &r);
  return r;
J
jorlow@chromium.org 已提交
1009
}
Y
Yi Wu 已提交
1010
#endif  // ROCKSDB_LITE
J
jorlow@chromium.org 已提交
1011

Y
Yi Wu 已提交
1012 1013 1014 1015 1016 1017 1018 1019
TEST_F(DBTest, FailMoreDbPaths) {
  Options options = CurrentOptions();
  options.db_paths.emplace_back(dbname_, 10000000);
  options.db_paths.emplace_back(dbname_ + "_2", 1000000);
  options.db_paths.emplace_back(dbname_ + "_3", 1000000);
  options.db_paths.emplace_back(dbname_ + "_4", 1000000);
  options.db_paths.emplace_back(dbname_ + "_5", 1000000);
  ASSERT_TRUE(TryReopen(options).IsNotSupported());
1020 1021
}

Y
Yi Wu 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
void CheckColumnFamilyMeta(const ColumnFamilyMetaData& cf_meta) {
  uint64_t cf_size = 0;
  uint64_t cf_csize = 0;
  size_t file_count = 0;
  for (auto level_meta : cf_meta.levels) {
    uint64_t level_size = 0;
    uint64_t level_csize = 0;
    file_count += level_meta.files.size();
    for (auto file_meta : level_meta.files) {
      level_size += file_meta.size;
1032
    }
Y
Yi Wu 已提交
1033 1034 1035 1036 1037 1038 1039
    ASSERT_EQ(level_meta.size, level_size);
    cf_size += level_size;
    cf_csize += level_csize;
  }
  ASSERT_EQ(cf_meta.file_count, file_count);
  ASSERT_EQ(cf_meta.size, cf_size);
}
1040

Y
Yi Wu 已提交
1041 1042 1043 1044 1045
#ifndef ROCKSDB_LITE
TEST_F(DBTest, ColumnFamilyMetaDataTest) {
  Options options = CurrentOptions();
  options.create_if_missing = true;
  DestroyAndReopen(options);
1046

Y
Yi Wu 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055
  Random rnd(301);
  int key_index = 0;
  ColumnFamilyMetaData cf_meta;
  for (int i = 0; i < 100; ++i) {
    GenerateNewFile(&rnd, &key_index);
    db_->GetColumnFamilyMetaData(&cf_meta);
    CheckColumnFamilyMeta(cf_meta);
  }
}
1056

Y
Yi Wu 已提交
1057 1058 1059
namespace {
void MinLevelHelper(DBTest* self, Options& options) {
  Random rnd(301);
1060

Y
Yi Wu 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
       num++) {
    std::vector<std::string> values;
    // Write 120KB (12 values, each 10K)
    for (int i = 0; i < 12; i++) {
      values.push_back(DBTestBase::RandomString(&rnd, 10000));
      ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
    }
    self->dbfull()->TEST_WaitForFlushMemTable();
    ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1);
  }
1072

Y
Yi Wu 已提交
1073 1074 1075 1076 1077 1078 1079
  // generate one more file in level-0, and should trigger level-0 compaction
  std::vector<std::string> values;
  for (int i = 0; i < 12; i++) {
    values.push_back(DBTestBase::RandomString(&rnd, 10000));
    ASSERT_OK(self->Put(DBTestBase::Key(i), values[i]));
  }
  self->dbfull()->TEST_WaitForCompact();
1080

Y
Yi Wu 已提交
1081 1082
  ASSERT_EQ(self->NumTableFilesAtLevel(0), 0);
  ASSERT_EQ(self->NumTableFilesAtLevel(1), 1);
1083 1084
}

Y
Yi Wu 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
// returns false if the calling-Test should be skipped
bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
                        int lev, int strategy) {
  fprintf(stderr,
          "Test with compression options : window_bits = %d, level =  %d, "
          "strategy = %d}\n",
          wbits, lev, strategy);
  options.write_buffer_size = 100 << 10;  // 100KB
  options.arena_block_size = 4096;
  options.num_levels = 3;
  options.level0_file_num_compaction_trigger = 3;
  options.create_if_missing = true;
1097

Y
Yi Wu 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
  if (Snappy_Supported()) {
    type = kSnappyCompression;
    fprintf(stderr, "using snappy\n");
  } else if (Zlib_Supported()) {
    type = kZlibCompression;
    fprintf(stderr, "using zlib\n");
  } else if (BZip2_Supported()) {
    type = kBZip2Compression;
    fprintf(stderr, "using bzip2\n");
  } else if (LZ4_Supported()) {
    type = kLZ4Compression;
    fprintf(stderr, "using lz4\n");
1110 1111 1112 1113
  } else if (XPRESS_Supported()) {
    type = kXpressCompression;
    fprintf(stderr, "using xpress\n");
  } else if (ZSTD_Supported()) {
S
sdong 已提交
1114
    type = kZSTD;
1115
    fprintf(stderr, "using ZSTD\n");
Y
Yi Wu 已提交
1116 1117 1118 1119 1120
  } else {
    fprintf(stderr, "skipping test, compression disabled\n");
    return false;
  }
  options.compression_per_level.resize(options.num_levels);
1121

Y
Yi Wu 已提交
1122 1123 1124 1125 1126 1127 1128 1129
  // do not compress L0
  for (int i = 0; i < 1; i++) {
    options.compression_per_level[i] = kNoCompression;
  }
  for (int i = 1; i < options.num_levels; i++) {
    options.compression_per_level[i] = type;
  }
  return true;
J
jorlow@chromium.org 已提交
1130
}
Y
Yi Wu 已提交
1131
}  // namespace
J
jorlow@chromium.org 已提交
1132

Y
Yi Wu 已提交
1133 1134 1135 1136 1137 1138 1139 1140
TEST_F(DBTest, MinLevelToCompress1) {
  Options options = CurrentOptions();
  CompressionType type = kSnappyCompression;
  if (!MinLevelToCompress(type, options, -14, -1, 0)) {
    return;
  }
  Reopen(options);
  MinLevelHelper(this, options);
1141

Y
Yi Wu 已提交
1142 1143 1144 1145 1146 1147 1148 1149 1150
  // do not compress L0 and L1
  for (int i = 0; i < 2; i++) {
    options.compression_per_level[i] = kNoCompression;
  }
  for (int i = 2; i < options.num_levels; i++) {
    options.compression_per_level[i] = type;
  }
  DestroyAndReopen(options);
  MinLevelHelper(this, options);
1151 1152
}

Y
Yi Wu 已提交
1153 1154 1155 1156 1157 1158 1159 1160
TEST_F(DBTest, MinLevelToCompress2) {
  Options options = CurrentOptions();
  CompressionType type = kSnappyCompression;
  if (!MinLevelToCompress(type, options, 15, -1, 0)) {
    return;
  }
  Reopen(options);
  MinLevelHelper(this, options);
I
Igor Canadi 已提交
1161

Y
Yi Wu 已提交
1162 1163 1164 1165 1166 1167
  // do not compress L0 and L1
  for (int i = 0; i < 2; i++) {
    options.compression_per_level[i] = kNoCompression;
  }
  for (int i = 2; i < options.num_levels; i++) {
    options.compression_per_level[i] = type;
I
Igor Canadi 已提交
1168
  }
Y
Yi Wu 已提交
1169 1170 1171
  DestroyAndReopen(options);
  MinLevelHelper(this, options);
}
I
Igor Canadi 已提交
1172

1173 1174 1175
// This test may fail because of a legit case that multiple L0 files
// are trivial moved to L1.
TEST_F(DBTest, DISABLED_RepeatedWritesToSameKey) {
I
Igor Canadi 已提交
1176 1177
  do {
    Options options = CurrentOptions();
Y
Yi Wu 已提交
1178 1179 1180
    options.env = env_;
    options.write_buffer_size = 100000;  // Small write buffer
    CreateAndReopenWithCF({"pikachu"}, options);
I
Igor Canadi 已提交
1181

Y
Yi Wu 已提交
1182 1183 1184 1185
    // We must have at most one file per level except for level-0,
    // which may have up to kL0_StopWritesTrigger files.
    const int kMaxFiles =
        options.num_levels + options.level0_stop_writes_trigger;
1186

Y
Yi Wu 已提交
1187 1188 1189 1190 1191 1192
    Random rnd(301);
    std::string value =
        RandomString(&rnd, static_cast<int>(2 * options.write_buffer_size));
    for (int i = 0; i < 5 * kMaxFiles; i++) {
      ASSERT_OK(Put(1, "key", value));
      ASSERT_LE(TotalTableFiles(1), kMaxFiles);
1193
    }
1194
  } while (ChangeCompactOptions());
1195
}
Y
Yi Wu 已提交
1196
#endif  // ROCKSDB_LITE
1197

Y
Yi Wu 已提交
1198
TEST_F(DBTest, SparseMerge) {
1199 1200
  do {
    Options options = CurrentOptions();
Y
Yi Wu 已提交
1201
    options.compression = kNoCompression;
L
Lei Jin 已提交
1202
    CreateAndReopenWithCF({"pikachu"}, options);
Y
Yi Wu 已提交
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220

    FillLevels("A", "Z", 1);

    // Suppose there is:
    //    small amount of data with prefix A
    //    large amount of data with prefix B
    //    small amount of data with prefix C
    // and that recent updates have made small changes to all three prefixes.
    // Check that we do not do a compaction that merges all of B in one shot.
    const std::string value(1000, 'x');
    Put(1, "A", "va");
    // Write approximately 100MB of "B" values
    for (int i = 0; i < 100000; i++) {
      char key[100];
      snprintf(key, sizeof(key), "B%010d", i);
      Put(1, key, value);
    }
    Put(1, "C", "vc");
1221
    ASSERT_OK(Flush(1));
Y
Yi Wu 已提交
1222
    dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
1223

Y
Yi Wu 已提交
1224 1225 1226 1227
    // Make sparse update
    Put(1, "A", "va2");
    Put(1, "B100", "bvalue2");
    Put(1, "C", "vc2");
1228
    ASSERT_OK(Flush(1));
Y
Yi Wu 已提交
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239

    // Compactions should not cause us to create a situation where
    // a file overlaps too much data at the next level.
    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
              20 * 1048576);
    dbfull()->TEST_CompactRange(0, nullptr, nullptr);
    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
              20 * 1048576);
    dbfull()->TEST_CompactRange(1, nullptr, nullptr);
    ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]),
              20 * 1048576);
1240
  } while (ChangeCompactOptions());
1241 1242
}

Y
Yi Wu 已提交
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
#ifndef ROCKSDB_LITE
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
  bool result = (val >= low) && (val <= high);
  if (!result) {
    fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
            (unsigned long long)(val), (unsigned long long)(low),
            (unsigned long long)(high));
  }
  return result;
}
1253

Y
Yi Wu 已提交
1254
TEST_F(DBTest, ApproximateSizesMemTable) {
1255
  Options options = CurrentOptions();
Y
Yi Wu 已提交
1256 1257 1258 1259
  options.write_buffer_size = 100000000;  // Large write buffer
  options.compression = kNoCompression;
  options.create_if_missing = true;
  DestroyAndReopen(options);
1260
  auto default_cf = db_->DefaultColumnFamily();
1261

Y
Yi Wu 已提交
1262 1263 1264 1265 1266
  const int N = 128;
  Random rnd(301);
  for (int i = 0; i < N; i++) {
    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
  }
1267

Y
Yi Wu 已提交
1268 1269 1270 1271
  uint64_t size;
  std::string start = Key(50);
  std::string end = Key(60);
  Range r(start, end);
1272 1273 1274 1275
  SizeApproximationOptions size_approx_options;
  size_approx_options.include_memtabtles = true;
  size_approx_options.include_files = true;
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1276 1277 1278
  ASSERT_GT(size, 6000);
  ASSERT_LT(size, 204800);
  // Zero if not including mem table
1279
  db_->GetApproximateSizes(&r, 1, &size);
Y
Yi Wu 已提交
1280
  ASSERT_EQ(size, 0);
1281

Y
Yi Wu 已提交
1282 1283 1284
  start = Key(500);
  end = Key(600);
  r = Range(start, end);
1285
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1286
  ASSERT_EQ(size, 0);
1287

Y
Yi Wu 已提交
1288 1289 1290
  for (int i = 0; i < N; i++) {
    ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
  }
1291

Y
Yi Wu 已提交
1292 1293 1294
  start = Key(500);
  end = Key(600);
  r = Range(start, end);
1295
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1296
  ASSERT_EQ(size, 0);
1297

Y
Yi Wu 已提交
1298 1299 1300
  start = Key(100);
  end = Key(1020);
  r = Range(start, end);
1301
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1302
  ASSERT_GT(size, 6000);
H
heyongqiang 已提交
1303

Y
Yi Wu 已提交
1304 1305 1306 1307
  options.max_write_buffer_number = 8;
  options.min_write_buffer_number_to_merge = 5;
  options.write_buffer_size = 1024 * N;  // Not very large
  DestroyAndReopen(options);
1308
  default_cf = db_->DefaultColumnFamily();
1309

Y
Yi Wu 已提交
1310 1311 1312 1313 1314 1315 1316
  int keys[N * 3];
  for (int i = 0; i < N; i++) {
    keys[i * 3] = i * 5;
    keys[i * 3 + 1] = i * 5 + 1;
    keys[i * 3 + 2] = i * 5 + 2;
  }
  std::random_shuffle(std::begin(keys), std::end(keys));
H
heyongqiang 已提交
1317

Y
Yi Wu 已提交
1318 1319 1320
  for (int i = 0; i < N * 3; i++) {
    ASSERT_OK(Put(Key(keys[i] + 1000), RandomString(&rnd, 1024)));
  }
H
heyongqiang 已提交
1321

Y
Yi Wu 已提交
1322 1323 1324
  start = Key(100);
  end = Key(300);
  r = Range(start, end);
1325
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1326
  ASSERT_EQ(size, 0);
H
heyongqiang 已提交
1327

Y
Yi Wu 已提交
1328 1329 1330
  start = Key(1050);
  end = Key(1080);
  r = Range(start, end);
1331
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1332
  ASSERT_GT(size, 6000);
H
heyongqiang 已提交
1333

Y
Yi Wu 已提交
1334 1335 1336
  start = Key(2100);
  end = Key(2300);
  r = Range(start, end);
1337
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
Y
Yi Wu 已提交
1338
  ASSERT_EQ(size, 0);
1339

Y
Yi Wu 已提交
1340 1341 1342 1343
  start = Key(1050);
  end = Key(1080);
  r = Range(start, end);
  uint64_t size_with_mt, size_without_mt;
1344 1345
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
                           &size_with_mt);
Y
Yi Wu 已提交
1346
  ASSERT_GT(size_with_mt, 6000);
1347
  db_->GetApproximateSizes(&r, 1, &size_without_mt);
Y
Yi Wu 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
  ASSERT_EQ(size_without_mt, 0);

  Flush();

  for (int i = 0; i < N; i++) {
    ASSERT_OK(Put(Key(i + 1000), RandomString(&rnd, 1024)));
  }

  start = Key(1050);
  end = Key(1080);
  r = Range(start, end);
1359 1360
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
                           &size_with_mt);
1361
  db_->GetApproximateSizes(&r, 1, &size_without_mt);
Y
Yi Wu 已提交
1362 1363
  ASSERT_GT(size_with_mt, size_without_mt);
  ASSERT_GT(size_without_mt, 6000);
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432

  // Check that include_memtabtles flag works as expected
  size_approx_options.include_memtabtles = false;
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
  ASSERT_EQ(size, size_without_mt);

  // Check that files_size_error_margin works as expected, when the heuristic
  // conditions are not met
  start = Key(1);
  end = Key(1000 + N - 2);
  r = Range(start, end);
  size_approx_options.files_size_error_margin = -1.0;  // disabled
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
  uint64_t size2;
  size_approx_options.files_size_error_margin = 0.5;  // enabled, but not used
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
  ASSERT_EQ(size, size2);
}

TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
  Options options = CurrentOptions();
  options.write_buffer_size = 1024 * 1024;
  options.compression = kNoCompression;
  options.create_if_missing = true;
  options.target_file_size_base = 1024 * 1024;
  DestroyAndReopen(options);
  const auto default_cf = db_->DefaultColumnFamily();

  const int N = 64000;
  Random rnd(301);
  for (int i = 0; i < N; i++) {
    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
  }
  // Flush everything to files
  Flush();
  // Compact the entire key space into the next level
  db_->CompactRange(CompactRangeOptions(), default_cf, nullptr, nullptr);

  // Write more keys
  for (int i = N; i < (N + N / 4); i++) {
    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
  }
  // Flush everything to files again
  Flush();

  // Wait for compaction to finish
  ASSERT_OK(dbfull()->TEST_WaitForCompact());

  const std::string start = Key(0);
  const std::string end = Key(2 * N);
  const Range r(start, end);

  SizeApproximationOptions size_approx_options;
  size_approx_options.include_memtabtles = false;
  size_approx_options.include_files = true;
  size_approx_options.files_size_error_margin = -1.0;  // disabled

  // Get the precise size without any approximation heuristic
  uint64_t size;
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
  ASSERT_NE(size, 0);

  // Get the size with an approximation heuristic
  uint64_t size2;
  const double error_margin = 0.2;
  size_approx_options.files_size_error_margin = error_margin;
  db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
  ASSERT_LT(size2, size * (1 + error_margin));
  ASSERT_GT(size2, size * (1 - error_margin));
H
heyongqiang 已提交
1433 1434
}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
TEST_F(DBTest, GetApproximateMemTableStats) {
  Options options = CurrentOptions();
  options.write_buffer_size = 100000000;
  options.compression = kNoCompression;
  options.create_if_missing = true;
  DestroyAndReopen(options);

  const int N = 128;
  Random rnd(301);
  for (int i = 0; i < N; i++) {
    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
  }

  uint64_t count;
  uint64_t size;

  std::string start = Key(50);
  std::string end = Key(60);
  Range r(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_GT(count, 0);
  ASSERT_LE(count, N);
  ASSERT_GT(size, 6000);
  ASSERT_LT(size, 204800);

  start = Key(500);
  end = Key(600);
  r = Range(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_EQ(count, 0);
  ASSERT_EQ(size, 0);

  Flush();

  start = Key(50);
  end = Key(60);
  r = Range(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_EQ(count, 0);
  ASSERT_EQ(size, 0);

  for (int i = 0; i < N; i++) {
    ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024)));
  }

  start = Key(100);
  end = Key(1020);
  r = Range(start, end);
  db_->GetApproximateMemTableStats(r, &count, &size);
  ASSERT_GT(count, 20);
  ASSERT_GT(size, 6000);
}

Y
Yi Wu 已提交
1488
TEST_F(DBTest, ApproximateSizes) {
S
Sanjay Ghemawat 已提交
1489
  do {
Y
Yi Wu 已提交
1490 1491 1492 1493 1494 1495
    Options options = CurrentOptions();
    options.write_buffer_size = 100000000;  // Large write buffer
    options.compression = kNoCompression;
    options.create_if_missing = true;
    DestroyAndReopen(options);
    CreateAndReopenWithCF({"pikachu"}, options);
J
jorlow@chromium.org 已提交
1496

Y
Yi Wu 已提交
1497 1498 1499
    ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
    ReopenWithColumnFamilies({"default", "pikachu"}, options);
    ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
I
Igor Canadi 已提交
1500

Y
Yi Wu 已提交
1501 1502 1503 1504 1505 1506 1507 1508
    // Write 8MB (80 values, each 100K)
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
    const int N = 80;
    static const int S1 = 100000;
    static const int S2 = 105000;  // Allow some expansion from metadata
    Random rnd(301);
    for (int i = 0; i < N; i++) {
      ASSERT_OK(Put(1, Key(i), RandomString(&rnd, S1)));
1509 1510
    }

Y
Yi Wu 已提交
1511 1512
    // 0 because GetApproximateSizes() does not account for memtable space
    ASSERT_TRUE(Between(Size("", Key(50), 1), 0, 0));
I
Igor Canadi 已提交
1513

Y
Yi Wu 已提交
1514 1515 1516
    // Check sizes across recovery by reopening a few times
    for (int run = 0; run < 3; run++) {
      ReopenWithColumnFamilies({"default", "pikachu"}, options);
I
Igor Canadi 已提交
1517

Y
Yi Wu 已提交
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
      for (int compact_start = 0; compact_start < N; compact_start += 10) {
        for (int i = 0; i < N; i += 10) {
          ASSERT_TRUE(Between(Size("", Key(i), 1), S1 * i, S2 * i));
          ASSERT_TRUE(Between(Size("", Key(i) + ".suffix", 1), S1 * (i + 1),
                              S2 * (i + 1)));
          ASSERT_TRUE(Between(Size(Key(i), Key(i + 10), 1), S1 * 10, S2 * 10));
        }
        ASSERT_TRUE(Between(Size("", Key(50), 1), S1 * 50, S2 * 50));
        ASSERT_TRUE(
            Between(Size("", Key(50) + ".suffix", 1), S1 * 50, S2 * 50));

        std::string cstart_str = Key(compact_start);
        std::string cend_str = Key(compact_start + 9);
        Slice cstart = cstart_str;
        Slice cend = cend_str;
        dbfull()->TEST_CompactRange(0, &cstart, &cend, handles_[1]);
      }

      ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
      ASSERT_GT(NumTableFilesAtLevel(1, 1), 0);
    }
    // ApproximateOffsetOf() is not yet implemented in plain table format.
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
                         kSkipPlainTable | kSkipHashIndex));
I
Igor Canadi 已提交
1542
}
J
jorlow@chromium.org 已提交
1543

Y
Yi Wu 已提交
1544
TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
1545
  do {
Y
Yi Wu 已提交
1546 1547
    Options options = CurrentOptions();
    options.compression = kNoCompression;
L
Lei Jin 已提交
1548
    CreateAndReopenWithCF({"pikachu"}, options);
Y
Yi Wu 已提交
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562

    Random rnd(301);
    std::string big1 = RandomString(&rnd, 100000);
    ASSERT_OK(Put(1, Key(0), RandomString(&rnd, 10000)));
    ASSERT_OK(Put(1, Key(1), RandomString(&rnd, 10000)));
    ASSERT_OK(Put(1, Key(2), big1));
    ASSERT_OK(Put(1, Key(3), RandomString(&rnd, 10000)));
    ASSERT_OK(Put(1, Key(4), big1));
    ASSERT_OK(Put(1, Key(5), RandomString(&rnd, 10000)));
    ASSERT_OK(Put(1, Key(6), RandomString(&rnd, 300000)));
    ASSERT_OK(Put(1, Key(7), RandomString(&rnd, 10000)));

    // Check sizes across recovery by reopening a few times
    for (int run = 0; run < 3; run++) {
L
Lei Jin 已提交
1563
      ReopenWithColumnFamilies({"default", "pikachu"}, options);
A
Abhishek Kona 已提交
1564

Y
Yi Wu 已提交
1565 1566 1567 1568 1569 1570 1571 1572 1573
      ASSERT_TRUE(Between(Size("", Key(0), 1), 0, 0));
      ASSERT_TRUE(Between(Size("", Key(1), 1), 10000, 11000));
      ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000));
      ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000));
      ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000));
      ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 231000));
      ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 241000));
      ASSERT_TRUE(Between(Size("", Key(7), 1), 540000, 541000));
      ASSERT_TRUE(Between(Size("", Key(8), 1), 550000, 560000));
M
Mayank Agarwal 已提交
1574

Y
Yi Wu 已提交
1575
      ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110000, 111000));
M
Mayank Agarwal 已提交
1576

Y
Yi Wu 已提交
1577 1578 1579 1580
      dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
    }
    // ApproximateOffsetOf() is not yet implemented in plain table format.
  } while (ChangeOptions(kSkipPlainTable));
M
Mayank Agarwal 已提交
1581
}
Y
Yi Wu 已提交
1582
#endif  // ROCKSDB_LITE
M
Mayank Agarwal 已提交
1583

I
Islam AbdelRahman 已提交
1584
#ifndef ROCKSDB_LITE
Y
Yi Wu 已提交
1585 1586 1587
TEST_F(DBTest, Snapshot) {
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
1588
  do {
Y
Yi Wu 已提交
1589 1590 1591
    CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override));
    Put(0, "foo", "0v1");
    Put(1, "foo", "1v1");
1592

Y
Yi Wu 已提交
1593 1594 1595 1596 1597 1598
    const Snapshot* s1 = db_->GetSnapshot();
    ASSERT_EQ(1U, GetNumSnapshots());
    uint64_t time_snap1 = GetTimeOldestSnapshots();
    ASSERT_GT(time_snap1, 0U);
    Put(0, "foo", "0v2");
    Put(1, "foo", "1v2");
J
jorlow@chromium.org 已提交
1599

Y
Yi Wu 已提交
1600
    env_->addon_time_.fetch_add(1);
1601

Y
Yi Wu 已提交
1602 1603 1604 1605 1606
    const Snapshot* s2 = db_->GetSnapshot();
    ASSERT_EQ(2U, GetNumSnapshots());
    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
    Put(0, "foo", "0v3");
    Put(1, "foo", "1v3");
1607

Y
Yi Wu 已提交
1608 1609 1610 1611
    {
      ManagedSnapshot s3(db_);
      ASSERT_EQ(3U, GetNumSnapshots());
      ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
1612

Y
Yi Wu 已提交
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
      Put(0, "foo", "0v4");
      Put(1, "foo", "1v4");
      ASSERT_EQ("0v1", Get(0, "foo", s1));
      ASSERT_EQ("1v1", Get(1, "foo", s1));
      ASSERT_EQ("0v2", Get(0, "foo", s2));
      ASSERT_EQ("1v2", Get(1, "foo", s2));
      ASSERT_EQ("0v3", Get(0, "foo", s3.snapshot()));
      ASSERT_EQ("1v3", Get(1, "foo", s3.snapshot()));
      ASSERT_EQ("0v4", Get(0, "foo"));
      ASSERT_EQ("1v4", Get(1, "foo"));
1623 1624
    }

Y
Yi Wu 已提交
1625 1626 1627 1628 1629 1630 1631 1632
    ASSERT_EQ(2U, GetNumSnapshots());
    ASSERT_EQ(time_snap1, GetTimeOldestSnapshots());
    ASSERT_EQ("0v1", Get(0, "foo", s1));
    ASSERT_EQ("1v1", Get(1, "foo", s1));
    ASSERT_EQ("0v2", Get(0, "foo", s2));
    ASSERT_EQ("1v2", Get(1, "foo", s2));
    ASSERT_EQ("0v4", Get(0, "foo"));
    ASSERT_EQ("1v4", Get(1, "foo"));
1633

Y
Yi Wu 已提交
1634 1635 1636 1637 1638 1639 1640
    db_->ReleaseSnapshot(s1);
    ASSERT_EQ("0v2", Get(0, "foo", s2));
    ASSERT_EQ("1v2", Get(1, "foo", s2));
    ASSERT_EQ("0v4", Get(0, "foo"));
    ASSERT_EQ("1v4", Get(1, "foo"));
    ASSERT_EQ(1U, GetNumSnapshots());
    ASSERT_LT(time_snap1, GetTimeOldestSnapshots());
1641

Y
Yi Wu 已提交
1642 1643 1644 1645
    db_->ReleaseSnapshot(s2);
    ASSERT_EQ(0U, GetNumSnapshots());
    ASSERT_EQ("0v4", Get(0, "foo"));
    ASSERT_EQ("1v4", Get(1, "foo"));
1646
  } while (ChangeOptions());
Y
Yi Wu 已提交
1647
}
1648

Y
Yi Wu 已提交
1649 1650 1651 1652 1653 1654 1655 1656
TEST_F(DBTest, HiddenValuesAreRemoved) {
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
  do {
    Options options = CurrentOptions(options_override);
    CreateAndReopenWithCF({"pikachu"}, options);
    Random rnd(301);
    FillLevels("a", "z", 1);
1657

Y
Yi Wu 已提交
1658 1659 1660 1661 1662 1663
    std::string big = RandomString(&rnd, 50000);
    Put(1, "foo", big);
    Put(1, "pastfoo", "v");
    const Snapshot* snapshot = db_->GetSnapshot();
    Put(1, "foo", "tiny");
    Put(1, "pastfoo2", "v2");  // Advance sequence number one more
1664

Y
Yi Wu 已提交
1665 1666
    ASSERT_OK(Flush(1));
    ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
S
sdong 已提交
1667

Y
Yi Wu 已提交
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
    ASSERT_EQ(big, Get(1, "foo", snapshot));
    ASSERT_TRUE(Between(Size("", "pastfoo", 1), 50000, 60000));
    db_->ReleaseSnapshot(snapshot);
    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]");
    Slice x("x");
    dbfull()->TEST_CompactRange(0, nullptr, &x, handles_[1]);
    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
    ASSERT_GE(NumTableFilesAtLevel(1, 1), 1);
    dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]);
    ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
1679

Y
Yi Wu 已提交
1680 1681 1682 1683
    ASSERT_TRUE(Between(Size("", "pastfoo", 1), 0, 1000));
    // ApproximateOffsetOf() is not yet implemented in plain table format,
    // which is used by Size().
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
1684
                         kSkipPlainTable));
Y
Yi Wu 已提交
1685 1686
}
#endif  // ROCKSDB_LITE
1687

Y
Yi Wu 已提交
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
TEST_F(DBTest, UnremovableSingleDelete) {
  // If we compact:
  //
  // Put(A, v1) Snapshot SingleDelete(A) Put(A, v2)
  //
  // We do not want to end up with:
  //
  // Put(A, v1) Snapshot Put(A, v2)
  //
  // Because a subsequent SingleDelete(A) would delete the Put(A, v2)
  // but not Put(A, v1), so Get(A) would return v1.
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
  do {
    Options options = CurrentOptions(options_override);
    options.disable_auto_compactions = true;
    CreateAndReopenWithCF({"pikachu"}, options);
K
kailiu 已提交
1705

Y
Yi Wu 已提交
1706 1707 1708 1709 1710
    Put(1, "foo", "first");
    const Snapshot* snapshot = db_->GetSnapshot();
    SingleDelete(1, "foo");
    Put(1, "foo", "second");
    ASSERT_OK(Flush(1));
1711

Y
Yi Wu 已提交
1712 1713
    ASSERT_EQ("first", Get(1, "foo", snapshot));
    ASSERT_EQ("second", Get(1, "foo"));
1714

Y
Yi Wu 已提交
1715 1716 1717
    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
                           nullptr);
    ASSERT_EQ("[ second, SDEL, first ]", AllEntriesFor("foo", 1));
1718

Y
Yi Wu 已提交
1719
    SingleDelete(1, "foo");
1720

Y
Yi Wu 已提交
1721 1722
    ASSERT_EQ("first", Get(1, "foo", snapshot));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
1723

Y
Yi Wu 已提交
1724 1725
    dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
                           nullptr);
1726

Y
Yi Wu 已提交
1727 1728 1729
    ASSERT_EQ("first", Get(1, "foo", snapshot));
    ASSERT_EQ("NOT_FOUND", Get(1, "foo"));
    db_->ReleaseSnapshot(snapshot);
1730 1731 1732 1733 1734
    // Skip FIFO and universal compaction beccause they do not apply to the test
    // case. Skip MergePut because single delete does not get removed when it
    // encounters a merge.
  } while (ChangeOptions(kSkipFIFOCompaction | kSkipUniversalCompaction |
                         kSkipMergePut));
1735 1736
}

Y
Yi Wu 已提交
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
#ifndef ROCKSDB_LITE
TEST_F(DBTest, DeletionMarkers1) {
  Options options = CurrentOptions();
  CreateAndReopenWithCF({"pikachu"}, options);
  Put(1, "foo", "v1");
  ASSERT_OK(Flush(1));
  const int last = 2;
  MoveFilesToLevel(last, 1);
  // foo => v1 is now in last level
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
1747

Y
Yi Wu 已提交
1748 1749 1750 1751 1752 1753 1754
  // Place a table at level last-1 to prevent merging with preceding mutation
  Put(1, "a", "begin");
  Put(1, "z", "end");
  Flush(1);
  MoveFilesToLevel(last - 1, 1);
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
  ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1);
1755

Y
Yi Wu 已提交
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
  Delete(1, "foo");
  Put(1, "foo", "v2");
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]");
  ASSERT_OK(Flush(1));  // Moves to level last-2
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
  Slice z("z");
  dbfull()->TEST_CompactRange(last - 2, nullptr, &z, handles_[1]);
  // DEL eliminated, but v1 remains because we aren't compacting that level
  // (DEL can be eliminated because v2 hides v1).
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]);
  // Merging last-1 w/ last, so we are the base level for "foo", so
  // DEL is removed.  (as is v1).
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]");
1770
}
1771

Y
Yi Wu 已提交
1772
TEST_F(DBTest, DeletionMarkers2) {
1773
  Options options = CurrentOptions();
Y
Yi Wu 已提交
1774 1775 1776 1777 1778 1779 1780
  CreateAndReopenWithCF({"pikachu"}, options);
  Put(1, "foo", "v1");
  ASSERT_OK(Flush(1));
  const int last = 2;
  MoveFilesToLevel(last, 1);
  // foo => v1 is now in last level
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
1781

Y
Yi Wu 已提交
1782 1783 1784 1785 1786 1787 1788
  // Place a table at level last-1 to prevent merging with preceding mutation
  Put(1, "a", "begin");
  Put(1, "z", "end");
  Flush(1);
  MoveFilesToLevel(last - 1, 1);
  ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1);
  ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1);
1789

Y
Yi Wu 已提交
1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
  Delete(1, "foo");
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
  ASSERT_OK(Flush(1));  // Moves to level last-2
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
  dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr, handles_[1]);
  // DEL kept: "last" file overlaps
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]");
  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]);
  // Merging last-1 w/ last, so we are the base level for "foo", so
  // DEL is removed.  (as is v1).
  ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
1801
}
1802

Y
Yi Wu 已提交
1803
TEST_F(DBTest, OverlapInLevel0) {
1804
  do {
S
sdong 已提交
1805
    Options options = CurrentOptions();
L
Lei Jin 已提交
1806
    CreateAndReopenWithCF({"pikachu"}, options);
1807

Y
Yi Wu 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
    // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
    // 0.
    ASSERT_OK(Put(1, "100", "v100"));
    ASSERT_OK(Put(1, "999", "v999"));
    Flush(1);
    MoveFilesToLevel(2, 1);
    ASSERT_OK(Delete(1, "100"));
    ASSERT_OK(Delete(1, "999"));
    Flush(1);
    MoveFilesToLevel(1, 1);
    ASSERT_EQ("0,1,1", FilesPerLevel(1));
1819

Y
Yi Wu 已提交
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
    // Make files spanning the following ranges in level-0:
    //  files[0]  200 .. 900
    //  files[1]  300 .. 500
    // Note that files are sorted by smallest key.
    ASSERT_OK(Put(1, "300", "v300"));
    ASSERT_OK(Put(1, "500", "v500"));
    Flush(1);
    ASSERT_OK(Put(1, "200", "v200"));
    ASSERT_OK(Put(1, "600", "v600"));
    ASSERT_OK(Put(1, "900", "v900"));
    Flush(1);
    ASSERT_EQ("2,1,1", FilesPerLevel(1));
1832

Y
Yi Wu 已提交
1833 1834 1835 1836
    // Compact away the placeholder files we created initially
    dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
    dbfull()->TEST_CompactRange(2, nullptr, nullptr, handles_[1]);
    ASSERT_EQ("2", FilesPerLevel(1));
1837

Y
Yi Wu 已提交
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
    // Do a memtable compaction.  Before bug-fix, the compaction would
    // not detect the overlap with level-0 files and would incorrectly place
    // the deletion in a deeper level.
    ASSERT_OK(Delete(1, "600"));
    Flush(1);
    ASSERT_EQ("3", FilesPerLevel(1));
    ASSERT_EQ("NOT_FOUND", Get(1, "600"));
  } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction));
}
#endif  // ROCKSDB_LITE
1848

Y
Yi Wu 已提交
1849 1850 1851
TEST_F(DBTest, ComparatorCheck) {
  class NewComparator : public Comparator {
   public:
1852 1853
    const char* Name() const override { return "rocksdb.NewComparator"; }
    int Compare(const Slice& a, const Slice& b) const override {
Y
Yi Wu 已提交
1854 1855
      return BytewiseComparator()->Compare(a, b);
    }
1856
    void FindShortestSeparator(std::string* s, const Slice& l) const override {
Y
Yi Wu 已提交
1857 1858
      BytewiseComparator()->FindShortestSeparator(s, l);
    }
1859
    void FindShortSuccessor(std::string* key) const override {
Y
Yi Wu 已提交
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
      BytewiseComparator()->FindShortSuccessor(key);
    }
  };
  Options new_options, options;
  NewComparator cmp;
  do {
    options = CurrentOptions();
    CreateAndReopenWithCF({"pikachu"}, options);
    new_options = CurrentOptions();
    new_options.comparator = &cmp;
    // only the non-default column family has non-matching comparator
    Status s = TryReopenWithColumnFamilies(
        {"default", "pikachu"}, std::vector<Options>({options, new_options}));
    ASSERT_TRUE(!s.ok());
    ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
        << s.ToString();
1876
  } while (ChangeCompactOptions());
1877 1878
}

Y
Yi Wu 已提交
1879 1880 1881
TEST_F(DBTest, CustomComparator) {
  class NumberComparator : public Comparator {
   public:
1882 1883
    const char* Name() const override { return "test.NumberComparator"; }
    int Compare(const Slice& a, const Slice& b) const override {
Y
Yi Wu 已提交
1884 1885
      return ToNumber(a) - ToNumber(b);
    }
1886
    void FindShortestSeparator(std::string* s, const Slice& l) const override {
Y
Yi Wu 已提交
1887 1888 1889
      ToNumber(*s);  // Check format
      ToNumber(l);   // Check format
    }
1890
    void FindShortSuccessor(std::string* key) const override {
Y
Yi Wu 已提交
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
      ToNumber(*key);  // Check format
    }

   private:
    static int ToNumber(const Slice& x) {
      // Check that there are no extra characters.
      EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
          << EscapeString(x);
      int val;
      char ignored;
      EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
          << EscapeString(x);
      return val;
    }
  };
  Options new_options;
  NumberComparator cmp;
  do {
    new_options = CurrentOptions();
    new_options.create_if_missing = true;
    new_options.comparator = &cmp;
    new_options.write_buffer_size = 4096;  // Compact more often
    new_options.arena_block_size = 4096;
    new_options = CurrentOptions(new_options);
    DestroyAndReopen(new_options);
    CreateAndReopenWithCF({"pikachu"}, new_options);
    ASSERT_OK(Put(1, "[10]", "ten"));
    ASSERT_OK(Put(1, "[0x14]", "twenty"));
    for (int i = 0; i < 2; i++) {
      ASSERT_EQ("ten", Get(1, "[10]"));
      ASSERT_EQ("ten", Get(1, "[0xa]"));
      ASSERT_EQ("twenty", Get(1, "[20]"));
      ASSERT_EQ("twenty", Get(1, "[0x14]"));
      ASSERT_EQ("NOT_FOUND", Get(1, "[15]"));
      ASSERT_EQ("NOT_FOUND", Get(1, "[0xf]"));
      Compact(1, "[0]", "[9999]");
    }

    for (int run = 0; run < 2; run++) {
      for (int i = 0; i < 1000; i++) {
        char buf[100];
        snprintf(buf, sizeof(buf), "[%d]", i * 10);
        ASSERT_OK(Put(1, buf, buf));
      }
      Compact(1, "[0]", "[1000000]");
    }
  } while (ChangeCompactOptions());
J
jorlow@chromium.org 已提交
1938 1939
}

Y
Yi Wu 已提交
1940
TEST_F(DBTest, DBOpen_Options) {
S
sdong 已提交
1941
  Options options = CurrentOptions();
1942
  std::string dbname = test::PerThreadDBPath("db_options_test");
Y
Yi Wu 已提交
1943
  ASSERT_OK(DestroyDB(dbname, options));
1944

Y
Yi Wu 已提交
1945 1946 1947 1948 1949 1950
  // Does not exist, and create_if_missing == false: error
  DB* db = nullptr;
  options.create_if_missing = false;
  Status s = DB::Open(options, dbname, &db);
  ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
  ASSERT_TRUE(db == nullptr);
1951

Y
Yi Wu 已提交
1952 1953 1954 1955 1956
  // Does not exist, and create_if_missing == true: OK
  options.create_if_missing = true;
  s = DB::Open(options, dbname, &db);
  ASSERT_OK(s);
  ASSERT_TRUE(db != nullptr);
1957

Y
Yi Wu 已提交
1958 1959
  delete db;
  db = nullptr;
1960

Y
Yi Wu 已提交
1961 1962 1963 1964 1965 1966
  // Does exist, and error_if_exists == true: error
  options.create_if_missing = false;
  options.error_if_exists = true;
  s = DB::Open(options, dbname, &db);
  ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
  ASSERT_TRUE(db == nullptr);
1967

Y
Yi Wu 已提交
1968 1969 1970 1971 1972 1973
  // Does exist, and error_if_exists == false: OK
  options.create_if_missing = true;
  options.error_if_exists = false;
  s = DB::Open(options, dbname, &db);
  ASSERT_OK(s);
  ASSERT_TRUE(db != nullptr);
1974

Y
Yi Wu 已提交
1975 1976 1977
  delete db;
  db = nullptr;
}
1978

Y
Yi Wu 已提交
1979 1980 1981
TEST_F(DBTest, DBOpen_Change_NumLevels) {
  Options options = CurrentOptions();
  options.create_if_missing = true;
1982
  DestroyAndReopen(options);
Y
Yi Wu 已提交
1983 1984
  ASSERT_TRUE(db_ != nullptr);
  CreateAndReopenWithCF({"pikachu"}, options);
1985

Y
Yi Wu 已提交
1986 1987 1988 1989 1990
  ASSERT_OK(Put(1, "a", "123"));
  ASSERT_OK(Put(1, "b", "234"));
  Flush(1);
  MoveFilesToLevel(3, 1);
  Close();
1991

Y
Yi Wu 已提交
1992 1993 1994 1995 1996 1997
  options.create_if_missing = false;
  options.num_levels = 2;
  Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
  ASSERT_TRUE(strstr(s.ToString().c_str(), "Invalid argument") != nullptr);
  ASSERT_TRUE(db_ == nullptr);
}
1998

Y
Yi Wu 已提交
1999
TEST_F(DBTest, DestroyDBMetaDatabase) {
2000
  std::string dbname = test::PerThreadDBPath("db_meta");
Y
Yi Wu 已提交
2001 2002 2003 2004 2005
  ASSERT_OK(env_->CreateDirIfMissing(dbname));
  std::string metadbname = MetaDatabaseName(dbname, 0);
  ASSERT_OK(env_->CreateDirIfMissing(metadbname));
  std::string metametadbname = MetaDatabaseName(metadbname, 0);
  ASSERT_OK(env_->CreateDirIfMissing(metametadbname));
2006

Y
Yi Wu 已提交
2007 2008 2009 2010 2011
  // Destroy previous versions if they exist. Using the long way.
  Options options = CurrentOptions();
  ASSERT_OK(DestroyDB(metametadbname, options));
  ASSERT_OK(DestroyDB(metadbname, options));
  ASSERT_OK(DestroyDB(dbname, options));
2012

Y
Yi Wu 已提交
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
  // Setup databases
  DB* db = nullptr;
  ASSERT_OK(DB::Open(options, dbname, &db));
  delete db;
  db = nullptr;
  ASSERT_OK(DB::Open(options, metadbname, &db));
  delete db;
  db = nullptr;
  ASSERT_OK(DB::Open(options, metametadbname, &db));
  delete db;
  db = nullptr;
2024

Y
Yi Wu 已提交
2025 2026
  // Delete databases
  ASSERT_OK(DestroyDB(dbname, options));
2027

Y
Yi Wu 已提交
2028 2029 2030 2031 2032
  // Check if deletion worked.
  options.create_if_missing = false;
  ASSERT_TRUE(!(DB::Open(options, dbname, &db)).ok());
  ASSERT_TRUE(!(DB::Open(options, metadbname, &db)).ok());
  ASSERT_TRUE(!(DB::Open(options, metametadbname, &db)).ok());
2033 2034
}

Y
Yi Wu 已提交
2035 2036
#ifndef ROCKSDB_LITE
TEST_F(DBTest, SnapshotFiles) {
A
Andres Noetzli 已提交
2037
  do {
Y
Yi Wu 已提交
2038 2039
    Options options = CurrentOptions();
    options.write_buffer_size = 100000000;  // Large write buffer
A
Andres Noetzli 已提交
2040 2041
    CreateAndReopenWithCF({"pikachu"}, options);

Y
Yi Wu 已提交
2042
    Random rnd(301);
A
Andres Noetzli 已提交
2043

Y
Yi Wu 已提交
2044 2045 2046 2047 2048 2049 2050
    // Write 8MB (80 values, each 100K)
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
    std::vector<std::string> values;
    for (int i = 0; i < 80; i++) {
      values.push_back(RandomString(&rnd, 100000));
      ASSERT_OK(Put((i < 40), Key(i), values[i]));
    }
A
Andres Noetzli 已提交
2051

Y
Yi Wu 已提交
2052 2053
    // assert that nothing makes it to disk yet.
    ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
A
Andres Noetzli 已提交
2054

Y
Yi Wu 已提交
2055 2056 2057 2058 2059 2060
    // get a file snapshot
    uint64_t manifest_number = 0;
    uint64_t manifest_size = 0;
    std::vector<std::string> files;
    dbfull()->DisableFileDeletions();
    dbfull()->GetLiveFiles(files, &manifest_size);
A
Andres Noetzli 已提交
2061

W
Wanning Jiang 已提交
2062 2063
    // CURRENT, MANIFEST, OPTIONS, *.sst files (one for each CF)
    ASSERT_EQ(files.size(), 5U);
A
Andres Noetzli 已提交
2064

Y
Yi Wu 已提交
2065 2066
    uint64_t number = 0;
    FileType type;
A
Andres Noetzli 已提交
2067

Y
Yi Wu 已提交
2068 2069 2070
    // copy these files to a new snapshot directory
    std::string snapdir = dbname_ + ".snapdir/";
    ASSERT_OK(env_->CreateDirIfMissing(snapdir));
A
Andres Noetzli 已提交
2071

Y
Yi Wu 已提交
2072 2073 2074 2075 2076 2077
    for (size_t i = 0; i < files.size(); i++) {
      // our clients require that GetLiveFiles returns
      // files with "/" as first character!
      ASSERT_EQ(files[i][0], '/');
      std::string src = dbname_ + files[i];
      std::string dest = snapdir + files[i];
2078

Y
Yi Wu 已提交
2079 2080
      uint64_t size;
      ASSERT_OK(env_->GetFileSize(src, &size));
2081

Y
Yi Wu 已提交
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093
      // record the number and the size of the
      // latest manifest file
      if (ParseFileName(files[i].substr(1), &number, &type)) {
        if (type == kDescriptorFile) {
          if (number > manifest_number) {
            manifest_number = number;
            ASSERT_GE(size, manifest_size);
            size = manifest_size;  // copy only valid MANIFEST data
          }
        }
      }
      CopyFile(src, dest, size);
2094
    }
2095

Y
Yi Wu 已提交
2096 2097 2098 2099 2100 2101 2102 2103
    // release file snapshot
    dbfull()->DisableFileDeletions();
    // overwrite one key, this key should not appear in the snapshot
    std::vector<std::string> extras;
    for (unsigned int i = 0; i < 1; i++) {
      extras.push_back(RandomString(&rnd, 100000));
      ASSERT_OK(Put(0, Key(i), extras[i]));
    }
L
Lei Jin 已提交
2104

Y
Yi Wu 已提交
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
    // verify that data in the snapshot are correct
    std::vector<ColumnFamilyDescriptor> column_families;
    column_families.emplace_back("default", ColumnFamilyOptions());
    column_families.emplace_back("pikachu", ColumnFamilyOptions());
    std::vector<ColumnFamilyHandle*> cf_handles;
    DB* snapdb;
    DBOptions opts;
    opts.env = env_;
    opts.create_if_missing = false;
    Status stat =
        DB::Open(opts, snapdir, column_families, &cf_handles, &snapdb);
    ASSERT_OK(stat);
2117

Y
Yi Wu 已提交
2118 2119 2120 2121 2122
    ReadOptions roptions;
    std::string val;
    for (unsigned int i = 0; i < 80; i++) {
      stat = snapdb->Get(roptions, cf_handles[i < 40], Key(i), &val);
      ASSERT_EQ(values[i].compare(val), 0);
L
Lei Jin 已提交
2123
    }
Y
Yi Wu 已提交
2124 2125 2126 2127
    for (auto cfh : cf_handles) {
      delete cfh;
    }
    delete snapdb;
L
Lei Jin 已提交
2128

Y
Yi Wu 已提交
2129 2130 2131 2132 2133 2134 2135
    // look at the new live files after we added an 'extra' key
    // and after we took the first snapshot.
    uint64_t new_manifest_number = 0;
    uint64_t new_manifest_size = 0;
    std::vector<std::string> newfiles;
    dbfull()->DisableFileDeletions();
    dbfull()->GetLiveFiles(newfiles, &new_manifest_size);
L
Lei Jin 已提交
2136

Y
Yi Wu 已提交
2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
    // find the new manifest file. assert that this manifest file is
    // the same one as in the previous snapshot. But its size should be
    // larger because we added an extra key after taking the
    // previous shapshot.
    for (size_t i = 0; i < newfiles.size(); i++) {
      std::string src = dbname_ + "/" + newfiles[i];
      // record the lognumber and the size of the
      // latest manifest file
      if (ParseFileName(newfiles[i].substr(1), &number, &type)) {
        if (type == kDescriptorFile) {
          if (number > new_manifest_number) {
            uint64_t size;
            new_manifest_number = number;
            ASSERT_OK(env_->GetFileSize(src, &size));
            ASSERT_GE(size, new_manifest_size);
          }
        }
      }
    }
    ASSERT_EQ(manifest_number, new_manifest_number);
    ASSERT_GT(new_manifest_size, manifest_size);
L
Lei Jin 已提交
2158

Y
Yi Wu 已提交
2159 2160 2161 2162 2163
    // release file snapshot
    dbfull()->DisableFileDeletions();
  } while (ChangeCompactOptions());
}
#endif
L
Lei Jin 已提交
2164

Y
Yi Wu 已提交
2165 2166 2167
TEST_F(DBTest, PurgeInfoLogs) {
  Options options = CurrentOptions();
  options.keep_log_file_num = 5;
2168
  options.create_if_missing = true;
Y
Yi Wu 已提交
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
  for (int mode = 0; mode <= 1; mode++) {
    if (mode == 1) {
      options.db_log_dir = dbname_ + "_logs";
      env_->CreateDirIfMissing(options.db_log_dir);
    } else {
      options.db_log_dir = "";
    }
    for (int i = 0; i < 8; i++) {
      Reopen(options);
    }
2179

Y
Yi Wu 已提交
2180 2181 2182 2183 2184 2185 2186
    std::vector<std::string> files;
    env_->GetChildren(options.db_log_dir.empty() ? dbname_ : options.db_log_dir,
                      &files);
    int info_log_count = 0;
    for (std::string file : files) {
      if (file.find("LOG") != std::string::npos) {
        info_log_count++;
2187
      }
2188
    }
Y
Yi Wu 已提交
2189
    ASSERT_EQ(5, info_log_count);
2190

Y
Yi Wu 已提交
2191 2192 2193 2194 2195 2196 2197
    Destroy(options);
    // For mode (1), test DestroyDB() to delete all the logs under DB dir.
    // For mode (2), no info log file should have been put under DB dir.
    std::vector<std::string> db_files;
    env_->GetChildren(dbname_, &db_files);
    for (std::string file : db_files) {
      ASSERT_TRUE(file.find("LOG") == std::string::npos);
2198 2199
    }

Y
Yi Wu 已提交
2200 2201 2202 2203 2204 2205 2206 2207
    if (mode == 1) {
      // Cleaning up
      env_->GetChildren(options.db_log_dir, &files);
      for (std::string file : files) {
        env_->DeleteFile(options.db_log_dir + "/" + file);
      }
      env_->DeleteDir(options.db_log_dir);
    }
2208 2209 2210
  }
}

Y
Yi Wu 已提交
2211 2212 2213
#ifndef ROCKSDB_LITE
// Multi-threaded test:
namespace {
2214

Y
Yi Wu 已提交
2215 2216 2217 2218
static const int kColumnFamilies = 10;
static const int kNumThreads = 10;
static const int kTestSeconds = 10;
static const int kNumKeys = 1000;
2219

Y
Yi Wu 已提交
2220 2221 2222 2223 2224 2225
struct MTState {
  DBTest* test;
  std::atomic<bool> stop;
  std::atomic<int> counter[kNumThreads];
  std::atomic<bool> thread_done[kNumThreads];
};
2226

Y
Yi Wu 已提交
2227 2228 2229
struct MTThread {
  MTState* state;
  int id;
2230
  bool multiget_batched;
Y
Yi Wu 已提交
2231
};
2232

Y
Yi Wu 已提交
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
static void MTThreadBody(void* arg) {
  MTThread* t = reinterpret_cast<MTThread*>(arg);
  int id = t->id;
  DB* db = t->state->test->db_;
  int counter = 0;
  fprintf(stderr, "... starting thread %d\n", id);
  Random rnd(1000 + id);
  char valbuf[1500];
  while (t->state->stop.load(std::memory_order_acquire) == false) {
    t->state->counter[id].store(counter, std::memory_order_release);
2243

Y
Yi Wu 已提交
2244 2245 2246
    int key = rnd.Uniform(kNumKeys);
    char keybuf[20];
    snprintf(keybuf, sizeof(keybuf), "%016d", key);
2247

Y
Yi Wu 已提交
2248 2249 2250 2251 2252
    if (rnd.OneIn(2)) {
      // Write values of the form <key, my id, counter, cf, unique_id>.
      // into each of the CFs
      // We add some padding for force compactions.
      int unique_id = rnd.Uniform(1000000);
2253

Y
Yi Wu 已提交
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
      // Half of the time directly use WriteBatch. Half of the time use
      // WriteBatchWithIndex.
      if (rnd.OneIn(2)) {
        WriteBatch batch;
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id,
                   static_cast<int>(counter), cf, unique_id);
          batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf));
        }
        ASSERT_OK(db->Write(WriteOptions(), &batch));
      } else {
        WriteBatchWithIndex batch(db->GetOptions().comparator);
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id,
                   static_cast<int>(counter), cf, unique_id);
          batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf));
        }
        ASSERT_OK(db->Write(WriteOptions(), batch.GetWriteBatch()));
      }
    } else {
      // Read a value and verify that it matches the pattern written above
      // and that writes to all column families were atomic (unique_id is the
      // same)
      std::vector<Slice> keys(kColumnFamilies, Slice(keybuf));
      std::vector<std::string> values;
2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
      std::vector<Status> statuses;
      if (!t->multiget_batched) {
        statuses = db->MultiGet(ReadOptions(), t->state->test->handles_, keys,
                                &values);
      } else {
        std::vector<PinnableSlice> pin_values(keys.size());
        statuses.resize(keys.size());
        const Snapshot* snapshot = db->GetSnapshot();
        ReadOptions ro;
        ro.snapshot = snapshot;
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          db->MultiGet(ro, t->state->test->handles_[cf], 1, &keys[cf],
                       &pin_values[cf], &statuses[cf]);
        }
        db->ReleaseSnapshot(snapshot);
        values.resize(keys.size());
        for (int cf = 0; cf < kColumnFamilies; ++cf) {
          if (statuses[cf].ok()) {
            values[cf].assign(pin_values[cf].data(), pin_values[cf].size());
          }
        }
      }
Y
Yi Wu 已提交
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
      Status s = statuses[0];
      // all statuses have to be the same
      for (size_t i = 1; i < statuses.size(); ++i) {
        // they are either both ok or both not-found
        ASSERT_TRUE((s.ok() && statuses[i].ok()) ||
                    (s.IsNotFound() && statuses[i].IsNotFound()));
      }
      if (s.IsNotFound()) {
        // Key has not yet been written
      } else {
        // Check that the writer thread counter is >= the counter in the value
        ASSERT_OK(s);
        int unique_id = -1;
        for (int i = 0; i < kColumnFamilies; ++i) {
          int k, w, c, cf, u;
          ASSERT_EQ(5, sscanf(values[i].c_str(), "%d.%d.%d.%d.%d", &k, &w, &c,
                              &cf, &u))
              << values[i];
          ASSERT_EQ(k, key);
          ASSERT_GE(w, 0);
          ASSERT_LT(w, kNumThreads);
          ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
          ASSERT_EQ(cf, i);
          if (i == 0) {
            unique_id = u;
          } else {
            // this checks that updates across column families happened
            // atomically -- all unique ids are the same
            ASSERT_EQ(u, unique_id);
          }
        }
      }
    }
    counter++;
  }
  t->state->thread_done[id].store(true, std::memory_order_release);
  fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
}
2339

Y
Yi Wu 已提交
2340
}  // namespace
2341

2342 2343 2344
class MultiThreadedDBTest
    : public DBTest,
      public ::testing::WithParamInterface<std::tuple<int, bool>> {
Y
Yi Wu 已提交
2345
 public:
2346 2347 2348
  void SetUp() override {
    std::tie(option_config_, multiget_batched_) = GetParam();
  }
2349

Y
Yi Wu 已提交
2350 2351 2352
  static std::vector<int> GenerateOptionConfigs() {
    std::vector<int> optionConfigs;
    for (int optionConfig = kDefault; optionConfig < kEnd; ++optionConfig) {
2353
      optionConfigs.push_back(optionConfig);
2354
    }
Y
Yi Wu 已提交
2355 2356
    return optionConfigs;
  }
2357 2358

  bool multiget_batched_;
Y
Yi Wu 已提交
2359
};
2360

Y
Yi Wu 已提交
2361
TEST_P(MultiThreadedDBTest, MultiThreaded) {
2362
  if (option_config_ == kPipelinedWrite) return;
Y
Yi Wu 已提交
2363 2364
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
2365
  Options options = CurrentOptions(options_override);
Y
Yi Wu 已提交
2366 2367 2368 2369
  std::vector<std::string> cfs;
  for (int i = 1; i < kColumnFamilies; ++i) {
    cfs.push_back(ToString(i));
  }
2370 2371
  Reopen(options);
  CreateAndReopenWithCF(cfs, options);
Y
Yi Wu 已提交
2372 2373 2374 2375 2376 2377 2378
  // Initialize state
  MTState mt;
  mt.test = this;
  mt.stop.store(false, std::memory_order_release);
  for (int id = 0; id < kNumThreads; id++) {
    mt.counter[id].store(0, std::memory_order_release);
    mt.thread_done[id].store(false, std::memory_order_release);
2379 2380
  }

Y
Yi Wu 已提交
2381 2382 2383 2384 2385
  // Start threads
  MTThread thread[kNumThreads];
  for (int id = 0; id < kNumThreads; id++) {
    thread[id].state = &mt;
    thread[id].id = id;
2386
    thread[id].multiget_batched = multiget_batched_;
Y
Yi Wu 已提交
2387
    env_->StartThread(MTThreadBody, &thread[id]);
2388
  }
Y
Yi Wu 已提交
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398

  // Let them run for a while
  env_->SleepForMicroseconds(kTestSeconds * 1000000);

  // Stop the threads and wait for them to finish
  mt.stop.store(true, std::memory_order_release);
  for (int id = 0; id < kNumThreads; id++) {
    while (mt.thread_done[id].load(std::memory_order_acquire) == false) {
      env_->SleepForMicroseconds(100000);
    }
2399 2400 2401
  }
}

Y
Yi Wu 已提交
2402 2403
INSTANTIATE_TEST_CASE_P(
    MultiThreaded, MultiThreadedDBTest,
2404 2405 2406
    ::testing::Combine(
        ::testing::ValuesIn(MultiThreadedDBTest::GenerateOptionConfigs()),
        ::testing::Bool()));
Y
Yi Wu 已提交
2407
#endif  // ROCKSDB_LITE
2408

Y
Yi Wu 已提交
2409
// Group commit test:
2410 2411 2412
#if !defined(TRAVIS) && !defined(OS_WIN)
// Disable this test temporarily on Travis and appveyor as it fails
// intermittently. Github issue: #4151
Y
Yi Wu 已提交
2413
namespace {
2414

Y
Yi Wu 已提交
2415 2416
static const int kGCNumThreads = 4;
static const int kGCNumKeys = 1000;
2417

Y
Yi Wu 已提交
2418 2419 2420 2421 2422
struct GCThread {
  DB* db;
  int id;
  std::atomic<bool> done;
};
2423

Y
Yi Wu 已提交
2424 2425 2426 2427 2428
static void GCThreadBody(void* arg) {
  GCThread* t = reinterpret_cast<GCThread*>(arg);
  int id = t->id;
  DB* db = t->db;
  WriteOptions wo;
2429

Y
Yi Wu 已提交
2430 2431 2432 2433 2434 2435
  for (int i = 0; i < kGCNumKeys; ++i) {
    std::string kv(ToString(i + id * kGCNumKeys));
    ASSERT_OK(db->Put(wo, kv, kv));
  }
  t->done = true;
}
2436

Y
Yi Wu 已提交
2437 2438 2439 2440 2441 2442 2443 2444 2445
}  // namespace

TEST_F(DBTest, GroupCommitTest) {
  do {
    Options options = CurrentOptions();
    options.env = env_;
    options.statistics = rocksdb::CreateDBStatistics();
    Reopen(options);

2446 2447
    rocksdb::SyncPoint::GetInstance()->LoadDependency(
        {{"WriteThread::JoinBatchGroup:BeganWaiting",
2448 2449 2450
          "DBImpl::WriteImpl:BeforeLeaderEnters"},
          {"WriteThread::AwaitState:BlockingWaiting",
          "WriteThread::EnterAsBatchGroupLeader:End"}});
2451 2452
    rocksdb::SyncPoint::GetInstance()->EnableProcessing();

Y
Yi Wu 已提交
2453 2454 2455 2456 2457 2458 2459
    // Start threads
    GCThread thread[kGCNumThreads];
    for (int id = 0; id < kGCNumThreads; id++) {
      thread[id].id = id;
      thread[id].db = db_;
      thread[id].done = false;
      env_->StartThread(GCThreadBody, &thread[id]);
2460
    }
2461
    env_->WaitForJoin();
2462

Y
Yi Wu 已提交
2463 2464 2465 2466 2467
    ASSERT_GT(TestGetTickerCount(options, WRITE_DONE_BY_OTHER), 0);

    std::vector<std::string> expected_db;
    for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
      expected_db.push_back(ToString(i));
2468
    }
2469
    std::sort(expected_db.begin(), expected_db.end());
Y
Yi Wu 已提交
2470 2471 2472 2473 2474 2475 2476 2477

    Iterator* itr = db_->NewIterator(ReadOptions());
    itr->SeekToFirst();
    for (auto x : expected_db) {
      ASSERT_TRUE(itr->Valid());
      ASSERT_EQ(itr->key().ToString(), x);
      ASSERT_EQ(itr->value().ToString(), x);
      itr->Next();
2478
    }
Y
Yi Wu 已提交
2479 2480
    ASSERT_TRUE(!itr->Valid());
    delete itr;
2481

A
Andrew Kryczka 已提交
2482
    HistogramData hist_data;
Y
Yi Wu 已提交
2483 2484 2485
    options.statistics->histogramData(DB_WRITE, &hist_data);
    ASSERT_GT(hist_data.average, 0.0);
  } while (ChangeOptions(kSkipNoSeekToLast));
2486
}
2487
#endif  // TRAVIS
2488

Y
Yi Wu 已提交
2489 2490
namespace {
typedef std::map<std::string, std::string> KVMap;
2491 2492
}

Y
Yi Wu 已提交
2493
class ModelDB : public DB {
2494
 public:
Y
Yi Wu 已提交
2495 2496 2497
  class ModelSnapshot : public Snapshot {
   public:
    KVMap map_;
2498

2499
    SequenceNumber GetSequenceNumber() const override {
Y
Yi Wu 已提交
2500 2501 2502 2503 2504
      // no need to call this
      assert(false);
      return 0;
    }
  };
2505

Y
Yi Wu 已提交
2506 2507
  explicit ModelDB(const Options& options) : options_(options) {}
  using DB::Put;
2508 2509
  Status Put(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k,
             const Slice& v) override {
Y
Yi Wu 已提交
2510 2511 2512
    WriteBatch batch;
    batch.Put(cf, k, v);
    return Write(o, &batch);
2513
  }
2514
  using DB::Close;
2515
  Status Close() override { return Status::OK(); }
Y
Yi Wu 已提交
2516
  using DB::Delete;
2517 2518
  Status Delete(const WriteOptions& o, ColumnFamilyHandle* cf,
                const Slice& key) override {
Y
Yi Wu 已提交
2519 2520 2521
    WriteBatch batch;
    batch.Delete(cf, key);
    return Write(o, &batch);
2522
  }
Y
Yi Wu 已提交
2523
  using DB::SingleDelete;
2524 2525
  Status SingleDelete(const WriteOptions& o, ColumnFamilyHandle* cf,
                      const Slice& key) override {
Y
Yi Wu 已提交
2526 2527 2528
    WriteBatch batch;
    batch.SingleDelete(cf, key);
    return Write(o, &batch);
2529
  }
Y
Yi Wu 已提交
2530
  using DB::Merge;
2531 2532
  Status Merge(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k,
               const Slice& v) override {
Y
Yi Wu 已提交
2533 2534 2535 2536 2537
    WriteBatch batch;
    batch.Merge(cf, k, v);
    return Write(o, &batch);
  }
  using DB::Get;
2538 2539
  Status Get(const ReadOptions& /*options*/, ColumnFamilyHandle* /*cf*/,
             const Slice& key, PinnableSlice* /*value*/) override {
Y
Yi Wu 已提交
2540
    return Status::NotSupported(key);
2541 2542
  }

Y
Yi Wu 已提交
2543
  using DB::MultiGet;
2544
  std::vector<Status> MultiGet(
A
Andrew Kryczka 已提交
2545 2546
      const ReadOptions& /*options*/,
      const std::vector<ColumnFamilyHandle*>& /*column_family*/,
Y
Yi Wu 已提交
2547
      const std::vector<Slice>& keys,
A
Andrew Kryczka 已提交
2548
      std::vector<std::string>* /*values*/) override {
Y
Yi Wu 已提交
2549 2550 2551 2552
    std::vector<Status> s(keys.size(),
                          Status::NotSupported("Not implemented."));
    return s;
  }
2553

Y
Yi Wu 已提交
2554
#ifndef ROCKSDB_LITE
2555
  using DB::IngestExternalFile;
2556
  Status IngestExternalFile(
A
Andrew Kryczka 已提交
2557 2558 2559
      ColumnFamilyHandle* /*column_family*/,
      const std::vector<std::string>& /*external_files*/,
      const IngestExternalFileOptions& /*options*/) override {
Y
Yi Wu 已提交
2560 2561
    return Status::NotSupported("Not implemented.");
  }
2562

Y
Yanqin Jin 已提交
2563
  using DB::IngestExternalFiles;
2564
  Status IngestExternalFiles(
Y
Yanqin Jin 已提交
2565 2566 2567 2568
      const std::vector<IngestExternalFileArg>& /*args*/) override {
    return Status::NotSupported("Not implemented");
  }

2569 2570 2571 2572 2573 2574 2575 2576 2577 2578
  using DB::CreateColumnFamilyWithImport;
  virtual Status CreateColumnFamilyWithImport(
      const ColumnFamilyOptions& /*options*/,
      const std::string& /*column_family_name*/,
      const ImportColumnFamilyOptions& /*import_options*/,
      const ExportImportFilesMetaData& /*metadata*/,
      ColumnFamilyHandle** /*handle*/) override {
    return Status::NotSupported("Not implemented.");
  }

2579
  Status VerifyChecksum() override {
A
Aaron G 已提交
2580 2581 2582
    return Status::NotSupported("Not implemented.");
  }

Y
Yi Wu 已提交
2583
  using DB::GetPropertiesOfAllTables;
2584
  Status GetPropertiesOfAllTables(
A
Andrew Kryczka 已提交
2585 2586
      ColumnFamilyHandle* /*column_family*/,
      TablePropertiesCollection* /*props*/) override {
Y
Yi Wu 已提交
2587
    return Status();
2588 2589
  }

2590
  Status GetPropertiesOfTablesInRange(
A
Andrew Kryczka 已提交
2591 2592
      ColumnFamilyHandle* /*column_family*/, const Range* /*range*/,
      std::size_t /*n*/, TablePropertiesCollection* /*props*/) override {
Y
Yi Wu 已提交
2593 2594 2595
    return Status();
  }
#endif  // ROCKSDB_LITE
2596

Y
Yi Wu 已提交
2597
  using DB::KeyMayExist;
2598 2599 2600 2601
  bool KeyMayExist(const ReadOptions& /*options*/,
                   ColumnFamilyHandle* /*column_family*/, const Slice& /*key*/,
                   std::string* /*value*/,
                   bool* value_found = nullptr) override {
Y
Yi Wu 已提交
2602 2603 2604 2605
    if (value_found != nullptr) {
      *value_found = false;
    }
    return true;  // Not Supported directly
2606
  }
Y
Yi Wu 已提交
2607
  using DB::NewIterator;
2608 2609
  Iterator* NewIterator(const ReadOptions& options,
                        ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2610 2611 2612 2613 2614 2615 2616 2617 2618
    if (options.snapshot == nullptr) {
      KVMap* saved = new KVMap;
      *saved = map_;
      return new ModelIter(saved, true);
    } else {
      const KVMap* snapshot_state =
          &(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
      return new ModelIter(snapshot_state, false);
    }
2619
  }
2620 2621 2622
  Status NewIterators(const ReadOptions& /*options*/,
                      const std::vector<ColumnFamilyHandle*>& /*column_family*/,
                      std::vector<Iterator*>* /*iterators*/) override {
Y
Yi Wu 已提交
2623 2624
    return Status::NotSupported("Not supported yet");
  }
2625
  const Snapshot* GetSnapshot() override {
Y
Yi Wu 已提交
2626 2627 2628
    ModelSnapshot* snapshot = new ModelSnapshot;
    snapshot->map_ = map_;
    return snapshot;
2629 2630
  }

2631
  void ReleaseSnapshot(const Snapshot* snapshot) override {
Y
Yi Wu 已提交
2632
    delete reinterpret_cast<const ModelSnapshot*>(snapshot);
2633
  }
Y
Yi Wu 已提交
2634

2635
  Status Write(const WriteOptions& /*options*/, WriteBatch* batch) override {
Y
Yi Wu 已提交
2636 2637 2638
    class Handler : public WriteBatch::Handler {
     public:
      KVMap* map_;
2639
      void Put(const Slice& key, const Slice& value) override {
Y
Yi Wu 已提交
2640 2641
        (*map_)[key.ToString()] = value.ToString();
      }
2642
      void Merge(const Slice& /*key*/, const Slice& /*value*/) override {
Y
Yi Wu 已提交
2643 2644 2645
        // ignore merge for now
        // (*map_)[key.ToString()] = value.ToString();
      }
2646
      void Delete(const Slice& key) override { map_->erase(key.ToString()); }
Y
Yi Wu 已提交
2647 2648 2649 2650
    };
    Handler handler;
    handler.map_ = &map_;
    return batch->Iterate(&handler);
2651 2652
  }

Y
Yi Wu 已提交
2653
  using DB::GetProperty;
2654 2655
  bool GetProperty(ColumnFamilyHandle* /*column_family*/,
                   const Slice& /*property*/, std::string* /*value*/) override {
Y
Yi Wu 已提交
2656 2657 2658
    return false;
  }
  using DB::GetIntProperty;
2659 2660
  bool GetIntProperty(ColumnFamilyHandle* /*column_family*/,
                      const Slice& /*property*/, uint64_t* /*value*/) override {
2661 2662 2663
    return false;
  }
  using DB::GetMapProperty;
2664 2665 2666
  bool GetMapProperty(ColumnFamilyHandle* /*column_family*/,
                      const Slice& /*property*/,
                      std::map<std::string, std::string>* /*value*/) override {
Y
Yi Wu 已提交
2667 2668 2669
    return false;
  }
  using DB::GetAggregatedIntProperty;
2670 2671
  bool GetAggregatedIntProperty(const Slice& /*property*/,
                                uint64_t* /*value*/) override {
Y
Yi Wu 已提交
2672 2673 2674
    return false;
  }
  using DB::GetApproximateSizes;
2675 2676 2677 2678
  Status GetApproximateSizes(const SizeApproximationOptions& /*options*/,
                             ColumnFamilyHandle* /*column_family*/,
                             const Range* /*range*/, int n,
                             uint64_t* sizes) override {
Y
Yi Wu 已提交
2679 2680 2681
    for (int i = 0; i < n; i++) {
      sizes[i] = 0;
    }
2682
    return Status::OK();
Y
Yi Wu 已提交
2683
  }
2684
  using DB::GetApproximateMemTableStats;
2685 2686 2687 2688
  void GetApproximateMemTableStats(ColumnFamilyHandle* /*column_family*/,
                                   const Range& /*range*/,
                                   uint64_t* const count,
                                   uint64_t* const size) override {
2689 2690 2691
    *count = 0;
    *size = 0;
  }
Y
Yi Wu 已提交
2692
  using DB::CompactRange;
2693 2694 2695
  Status CompactRange(const CompactRangeOptions& /*options*/,
                      ColumnFamilyHandle* /*column_family*/,
                      const Slice* /*start*/, const Slice* /*end*/) override {
Y
Yi Wu 已提交
2696 2697
    return Status::NotSupported("Not supported operation.");
  }
2698

2699
  Status SetDBOptions(
A
Andrew Kryczka 已提交
2700
      const std::unordered_map<std::string, std::string>& /*new_options*/)
2701 2702 2703 2704
      override {
    return Status::NotSupported("Not supported operation.");
  }

Y
Yi Wu 已提交
2705
  using DB::CompactFiles;
2706
  Status CompactFiles(
A
Andrew Kryczka 已提交
2707 2708 2709
      const CompactionOptions& /*compact_options*/,
      ColumnFamilyHandle* /*column_family*/,
      const std::vector<std::string>& /*input_file_names*/,
2710
      const int /*output_level*/, const int /*output_path_id*/ = -1,
2711 2712
      std::vector<std::string>* const /*output_file_names*/ = nullptr,
      CompactionJobInfo* /*compaction_job_info*/ = nullptr) override {
Y
Yi Wu 已提交
2713 2714
    return Status::NotSupported("Not supported operation.");
  }
2715

Y
Yi Wu 已提交
2716 2717 2718
  Status PauseBackgroundWork() override {
    return Status::NotSupported("Not supported operation.");
  }
2719

Y
Yi Wu 已提交
2720 2721 2722
  Status ContinueBackgroundWork() override {
    return Status::NotSupported("Not supported operation.");
  }
2723

Y
Yi Wu 已提交
2724
  Status EnableAutoCompaction(
A
Andrew Kryczka 已提交
2725 2726
      const std::vector<ColumnFamilyHandle*>& /*column_family_handles*/)
      override {
Y
Yi Wu 已提交
2727 2728
    return Status::NotSupported("Not supported operation.");
  }
2729

Y
Yi Wu 已提交
2730
  using DB::NumberLevels;
2731
  int NumberLevels(ColumnFamilyHandle* /*column_family*/) override { return 1; }
2732

Y
Yi Wu 已提交
2733
  using DB::MaxMemCompactionLevel;
2734
  int MaxMemCompactionLevel(ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2735
    return 1;
2736
  }
2737

Y
Yi Wu 已提交
2738
  using DB::Level0StopWriteTrigger;
2739
  int Level0StopWriteTrigger(ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2740 2741
    return -1;
  }
2742

2743
  const std::string& GetName() const override { return name_; }
2744

2745
  Env* GetEnv() const override { return nullptr; }
2746

Y
Yi Wu 已提交
2747
  using DB::GetOptions;
2748
  Options GetOptions(ColumnFamilyHandle* /*column_family*/) const override {
Y
Yi Wu 已提交
2749
    return options_;
2750
  }
2751

Y
Yi Wu 已提交
2752
  using DB::GetDBOptions;
2753
  DBOptions GetDBOptions() const override { return options_; }
2754

Y
Yi Wu 已提交
2755
  using DB::Flush;
2756 2757
  Status Flush(const rocksdb::FlushOptions& /*options*/,
               ColumnFamilyHandle* /*column_family*/) override {
Y
Yi Wu 已提交
2758 2759 2760
    Status ret;
    return ret;
  }
2761
  Status Flush(
Y
Yanqin Jin 已提交
2762 2763 2764 2765
      const rocksdb::FlushOptions& /*options*/,
      const std::vector<ColumnFamilyHandle*>& /*column_families*/) override {
    return Status::OK();
  }
L
Lei Jin 已提交
2766

2767
  Status SyncWAL() override { return Status::OK(); }
2768

Y
Yi Wu 已提交
2769
#ifndef ROCKSDB_LITE
2770
  Status DisableFileDeletions() override { return Status::OK(); }
2771

2772 2773 2774
  Status EnableFileDeletions(bool /*force*/) override { return Status::OK(); }
  Status GetLiveFiles(std::vector<std::string>&, uint64_t* /*size*/,
                      bool /*flush_memtable*/ = true) override {
Y
Yi Wu 已提交
2775 2776
    return Status::OK();
  }
2777

2778
  Status GetSortedWalFiles(VectorLogPtr& /*files*/) override {
Y
Yi Wu 已提交
2779 2780
    return Status::OK();
  }
2781

2782
  Status DeleteFile(std::string /*name*/) override { return Status::OK(); }
2783

2784
  Status GetUpdatesSince(
2785 2786
      rocksdb::SequenceNumber,
      std::unique_ptr<rocksdb::TransactionLogIterator>*,
A
Andrew Kryczka 已提交
2787
      const TransactionLogIterator::ReadOptions& /*read_options*/ =
A
Aaron Gao 已提交
2788
          TransactionLogIterator::ReadOptions()) override {
Y
Yi Wu 已提交
2789 2790
    return Status::NotSupported("Not supported in Model DB");
  }
2791

2792 2793
  void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/,
                               ColumnFamilyMetaData* /*metadata*/) override {}
Y
Yi Wu 已提交
2794 2795
#endif  // ROCKSDB_LITE

2796
  Status GetDbIdentity(std::string& /*identity*/) const override {
Y
Yi Wu 已提交
2797
    return Status::OK();
2798
  }
2799

2800
  SequenceNumber GetLatestSequenceNumber() const override { return 0; }
Y
Yi Wu 已提交
2801

2802
  bool SetPreserveDeletesSequenceNumber(SequenceNumber /*seqnum*/) override {
2803 2804 2805
    return true;
  }

2806
  ColumnFamilyHandle* DefaultColumnFamily() const override { return nullptr; }
Y
Yi Wu 已提交
2807 2808 2809 2810 2811 2812

 private:
  class ModelIter : public Iterator {
   public:
    ModelIter(const KVMap* map, bool owned)
        : map_(map), owned_(owned), iter_(map_->end()) {}
2813
    ~ModelIter() override {
Y
Yi Wu 已提交
2814 2815
      if (owned_) delete map_;
    }
2816 2817 2818
    bool Valid() const override { return iter_ != map_->end(); }
    void SeekToFirst() override { iter_ = map_->begin(); }
    void SeekToLast() override {
Y
Yi Wu 已提交
2819 2820 2821 2822 2823 2824
      if (map_->empty()) {
        iter_ = map_->end();
      } else {
        iter_ = map_->find(map_->rbegin()->first);
      }
    }
2825
    void Seek(const Slice& k) override {
Y
Yi Wu 已提交
2826 2827
      iter_ = map_->lower_bound(k.ToString());
    }
2828
    void SeekForPrev(const Slice& k) override {
A
Aaron Gao 已提交
2829 2830 2831
      iter_ = map_->upper_bound(k.ToString());
      Prev();
    }
2832 2833
    void Next() override { ++iter_; }
    void Prev() override {
Y
Yi Wu 已提交
2834 2835 2836 2837 2838 2839 2840
      if (iter_ == map_->begin()) {
        iter_ = map_->end();
        return;
      }
      --iter_;
    }

2841 2842 2843
    Slice key() const override { return iter_->first; }
    Slice value() const override { return iter_->second; }
    Status status() const override { return Status::OK(); }
2844

Y
Yi Wu 已提交
2845 2846 2847 2848 2849 2850 2851 2852 2853
   private:
    const KVMap* const map_;
    const bool owned_;  // Do we own map_
    KVMap::const_iterator iter_;
  };
  const Options options_;
  KVMap map_;
  std::string name_ = "";
};
2854

2855
#ifndef ROCKSDB_VALGRIND_RUN
Y
Yi Wu 已提交
2856 2857 2858 2859 2860 2861 2862 2863 2864
static std::string RandomKey(Random* rnd, int minimum = 0) {
  int len;
  do {
    len = (rnd->OneIn(3)
               ? 1  // Short sometimes to encourage collisions
               : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
  } while (len < minimum);
  return test::RandomKey(rnd, len);
}
L
Lei Jin 已提交
2865

Y
Yi Wu 已提交
2866 2867 2868 2869 2870 2871 2872 2873 2874
static bool CompareIterators(int step, DB* model, DB* db,
                             const Snapshot* model_snap,
                             const Snapshot* db_snap) {
  ReadOptions options;
  options.snapshot = model_snap;
  Iterator* miter = model->NewIterator(options);
  options.snapshot = db_snap;
  Iterator* dbiter = db->NewIterator(options);
  bool ok = true;
L
Lei Jin 已提交
2875
  int count = 0;
Y
Yi Wu 已提交
2876 2877
  for (miter->SeekToFirst(), dbiter->SeekToFirst();
       ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
L
Lei Jin 已提交
2878
    count++;
Y
Yi Wu 已提交
2879
    if (miter->key().compare(dbiter->key()) != 0) {
A
Aaron Gao 已提交
2880
      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
Y
Yi Wu 已提交
2881 2882 2883
              EscapeString(miter->key()).c_str(),
              EscapeString(dbiter->key()).c_str());
      ok = false;
2884 2885
      break;
    }
L
Lei Jin 已提交
2886

Y
Yi Wu 已提交
2887 2888 2889 2890 2891 2892
    if (miter->value().compare(dbiter->value()) != 0) {
      fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
              step, EscapeString(miter->key()).c_str(),
              EscapeString(miter->value()).c_str(),
              EscapeString(miter->value()).c_str());
      ok = false;
2893
    }
L
Lei Jin 已提交
2894 2895
  }

Y
Yi Wu 已提交
2896 2897 2898 2899 2900 2901
  if (ok) {
    if (miter->Valid() != dbiter->Valid()) {
      fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
              step, miter->Valid(), dbiter->Valid());
      ok = false;
    }
L
Lei Jin 已提交
2902
  }
Y
Yi Wu 已提交
2903 2904 2905 2906
  delete miter;
  delete dbiter;
  return ok;
}
L
Lei Jin 已提交
2907

Y
Yi Wu 已提交
2908 2909 2910
class DBTestRandomized : public DBTest,
                         public ::testing::WithParamInterface<int> {
 public:
2911
  void SetUp() override { option_config_ = GetParam(); }
L
Lei Jin 已提交
2912

Y
Yi Wu 已提交
2913 2914 2915 2916
  static std::vector<int> GenerateOptionConfigs() {
    std::vector<int> option_configs;
    // skip cuckoo hash as it does not support snapshot.
    for (int option_config = kDefault; option_config < kEnd; ++option_config) {
2917 2918
      if (!ShouldSkipOptions(option_config,
                             kSkipDeletesFilterFirst | kSkipNoSeekToLast)) {
Y
Yi Wu 已提交
2919 2920 2921 2922 2923
        option_configs.push_back(option_config);
      }
    }
    option_configs.push_back(kBlockBasedTableWithIndexRestartInterval);
    return option_configs;
L
Lei Jin 已提交
2924
  }
Y
Yi Wu 已提交
2925
};
2926

Y
Yi Wu 已提交
2927 2928 2929
INSTANTIATE_TEST_CASE_P(
    DBTestRandomized, DBTestRandomized,
    ::testing::ValuesIn(DBTestRandomized::GenerateOptionConfigs()));
2930

Y
Yi Wu 已提交
2931 2932 2933 2934
TEST_P(DBTestRandomized, Randomized) {
  anon::OptionsOverride options_override;
  options_override.skip_policy = kSkipNoSnapshot;
  Options options = CurrentOptions(options_override);
L
Lei Jin 已提交
2935
  DestroyAndReopen(options);
2936

Y
Yi Wu 已提交
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
  Random rnd(test::RandomSeed() + GetParam());
  ModelDB model(options);
  const int N = 10000;
  const Snapshot* model_snap = nullptr;
  const Snapshot* db_snap = nullptr;
  std::string k, v;
  for (int step = 0; step < N; step++) {
    // TODO(sanjay): Test Get() works
    int p = rnd.Uniform(100);
    int minimum = 0;
    if (option_config_ == kHashSkipList || option_config_ == kHashLinkList ||
        option_config_ == kPlainTableFirstBytePrefix ||
        option_config_ == kBlockBasedTableWithWholeKeyHashIndex ||
        option_config_ == kBlockBasedTableWithPrefixHashIndex) {
      minimum = 1;
    }
    if (p < 45) {  // Put
      k = RandomKey(&rnd, minimum);
      v = RandomString(&rnd,
                       rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
      ASSERT_OK(model.Put(WriteOptions(), k, v));
      ASSERT_OK(db_->Put(WriteOptions(), k, v));
    } else if (p < 90) {  // Delete
      k = RandomKey(&rnd, minimum);
      ASSERT_OK(model.Delete(WriteOptions(), k));
      ASSERT_OK(db_->Delete(WriteOptions(), k));
    } else {  // Multi-element batch
      WriteBatch b;
      const int num = rnd.Uniform(8);
      for (int i = 0; i < num; i++) {
        if (i == 0 || !rnd.OneIn(10)) {
          k = RandomKey(&rnd, minimum);
        } else {
          // Periodically re-use the same key from the previous iter, so
          // we have multiple entries in the write batch for the same key
        }
        if (rnd.OneIn(2)) {
          v = RandomString(&rnd, rnd.Uniform(10));
          b.Put(k, v);
        } else {
          b.Delete(k);
        }
2979
      }
Y
Yi Wu 已提交
2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
      ASSERT_OK(model.Write(WriteOptions(), &b));
      ASSERT_OK(db_->Write(WriteOptions(), &b));
    }

    if ((step % 100) == 0) {
      // For DB instances that use the hash index + block-based table, the
      // iterator will be invalid right when seeking a non-existent key, right
      // than return a key that is close to it.
      if (option_config_ != kBlockBasedTableWithWholeKeyHashIndex &&
          option_config_ != kBlockBasedTableWithPrefixHashIndex) {
        ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
        ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
2992
      }
Y
Yi Wu 已提交
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004

      // Save a snapshot from each DB this time that we'll use next
      // time we compare things, to make sure the current state is
      // preserved with the snapshot
      if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
      if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);

      Reopen(options);
      ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));

      model_snap = model.GetSnapshot();
      db_snap = db_->GetSnapshot();
3005 3006
    }
  }
Y
Yi Wu 已提交
3007 3008 3009
  if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
  if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
}
3010
#endif  // ROCKSDB_VALGRIND_RUN
Y
Yi Wu 已提交
3011 3012 3013 3014 3015 3016 3017 3018

TEST_F(DBTest, BlockBasedTablePrefixIndexTest) {
  // create a DB with block prefix index
  BlockBasedTableOptions table_options;
  Options options = CurrentOptions();
  table_options.index_type = BlockBasedTableOptions::kHashSearch;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
3019

Y
Yi Wu 已提交
3020 3021 3022 3023
  Reopen(options);
  ASSERT_OK(Put("k1", "v1"));
  Flush();
  ASSERT_OK(Put("k2", "v2"));
3024

Y
Yi Wu 已提交
3025 3026 3027 3028 3029
  // Reopen it without prefix extractor, make sure everything still works.
  // RocksDB should just fall back to the binary index.
  table_options.index_type = BlockBasedTableOptions::kBinarySearch;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  options.prefix_extractor.reset();
3030

Y
Yi Wu 已提交
3031 3032 3033
  Reopen(options);
  ASSERT_EQ("v1", Get("k1"));
  ASSERT_EQ("v2", Get("k2"));
3034 3035
}

Y
Yi Wu 已提交
3036 3037 3038
TEST_F(DBTest, ChecksumTest) {
  BlockBasedTableOptions table_options;
  Options options = CurrentOptions();
I
Igor Canadi 已提交
3039

Y
Yi Wu 已提交
3040 3041 3042 3043 3044 3045
  table_options.checksum = kCRC32c;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_OK(Put("a", "b"));
  ASSERT_OK(Put("c", "d"));
  ASSERT_OK(Flush());  // table with crc checksum
I
Igor Canadi 已提交
3046

Y
Yi Wu 已提交
3047 3048 3049 3050 3051 3052
  table_options.checksum = kxxHash;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_OK(Put("e", "f"));
  ASSERT_OK(Put("g", "h"));
  ASSERT_OK(Flush());  // table with xxhash checksum
I
Igor Canadi 已提交
3053

Y
Yi Wu 已提交
3054 3055 3056 3057 3058 3059 3060
  table_options.checksum = kCRC32c;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_EQ("b", Get("a"));
  ASSERT_EQ("d", Get("c"));
  ASSERT_EQ("f", Get("e"));
  ASSERT_EQ("h", Get("g"));
I
Igor Canadi 已提交
3061

Y
Yi Wu 已提交
3062 3063 3064 3065 3066 3067 3068
  table_options.checksum = kCRC32c;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  Reopen(options);
  ASSERT_EQ("b", Get("a"));
  ASSERT_EQ("d", Get("c"));
  ASSERT_EQ("f", Get("e"));
  ASSERT_EQ("h", Get("g"));
I
Igor Canadi 已提交
3069 3070
}

I
Islam AbdelRahman 已提交
3071
#ifndef ROCKSDB_LITE
Y
Yi Wu 已提交
3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
TEST_P(DBTestWithParam, FIFOCompactionTest) {
  for (int iter = 0; iter < 2; ++iter) {
    // first iteration -- auto compaction
    // second iteration -- manual compaction
    Options options;
    options.compaction_style = kCompactionStyleFIFO;
    options.write_buffer_size = 100 << 10;  // 100KB
    options.arena_block_size = 4096;
    options.compaction_options_fifo.max_table_files_size = 500 << 10;  // 500KB
    options.compression = kNoCompression;
    options.create_if_missing = true;
    options.max_subcompactions = max_subcompactions_;
    if (iter == 1) {
      options.disable_auto_compactions = true;
    }
    options = CurrentOptions(options);
    DestroyAndReopen(options);
I
Igor Canadi 已提交
3089

Y
Yi Wu 已提交
3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
    Random rnd(301);
    for (int i = 0; i < 6; ++i) {
      for (int j = 0; j < 110; ++j) {
        ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 980)));
      }
      // flush should happen here
      ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
    }
    if (iter == 0) {
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
    } else {
      CompactRangeOptions cro;
      cro.exclusive_manual_compaction = exclusive_manual_compaction_;
      ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
    }
    // only 5 files should survive
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
    for (int i = 0; i < 50; ++i) {
      // these keys should be deleted in previous compaction
      ASSERT_EQ("NOT_FOUND", Get(ToString(i)));
I
Igor Canadi 已提交
3110 3111
    }
  }
Y
Yi Wu 已提交
3112
}
3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139

TEST_F(DBTest, FIFOCompactionTestWithCompaction) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.write_buffer_size = 20 << 10;  // 20K
  options.arena_block_size = 4096;
  options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1MB
  options.compaction_options_fifo.allow_compaction = true;
  options.level0_file_num_compaction_trigger = 6;
  options.compression = kNoCompression;
  options.create_if_missing = true;
  options = CurrentOptions(options);
  DestroyAndReopen(options);

  Random rnd(301);
  for (int i = 0; i < 60; i++) {
    // Generate and flush a file about 20KB.
    for (int j = 0; j < 20; j++) {
      ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
    }
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }
  // It should be compacted to 10 files.
  ASSERT_EQ(NumTableFilesAtLevel(0), 10);

  for (int i = 0; i < 60; i++) {
S
Sagar Vemuri 已提交
3140
    // Generate and flush a file about 20KB.
3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154
    for (int j = 0; j < 20; j++) {
      ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
    }
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }

  // It should be compacted to no more than 20 files.
  ASSERT_GT(NumTableFilesAtLevel(0), 10);
  ASSERT_LT(NumTableFilesAtLevel(0), 18);
  // Size limit is still guaranteed.
  ASSERT_LE(SizeAtLevel(0),
            options.compaction_options_fifo.max_table_files_size);
}
S
Sagar Vemuri 已提交
3155

3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195
TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.write_buffer_size = 20 << 10;  // 20K
  options.arena_block_size = 4096;
  options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1MB
  options.compaction_options_fifo.allow_compaction = true;
  options.level0_file_num_compaction_trigger = 3;
  options.compression = kNoCompression;
  options.create_if_missing = true;
  options = CurrentOptions(options);
  DestroyAndReopen(options);

  Random rnd(301);
  for (int i = 0; i < 3; i++) {
    // Each file contains a different key which will be dropped later.
    ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
    ASSERT_OK(Put("key" + ToString(i), ""));
    ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  for (int i = 0; i < 3; i++) {
    ASSERT_EQ("", Get("key" + ToString(i)));
  }
  for (int i = 0; i < 3; i++) {
    // Each file contains a different key which will be dropped later.
    ASSERT_OK(Put("a" + ToString(i), RandomString(&rnd, 500)));
    ASSERT_OK(Delete("key" + ToString(i)));
    ASSERT_OK(Put("z" + ToString(i), RandomString(&rnd, 500)));
    Flush();
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
  }
  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
  for (int i = 0; i < 3; i++) {
    ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i)));
  }
}

S
Sagar Vemuri 已提交
3196 3197 3198 3199 3200
// Check that FIFO-with-TTL is not supported with max_open_files != -1.
TEST_F(DBTest, FIFOCompactionWithTTLAndMaxOpenFilesTest) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.create_if_missing = true;
3201
  options.ttl = 600;  // seconds
S
Sagar Vemuri 已提交
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216

  // Check that it is not supported with max_open_files != -1.
  options.max_open_files = 100;
  options = CurrentOptions(options);
  ASSERT_TRUE(TryReopen(options).IsNotSupported());

  options.max_open_files = -1;
  ASSERT_OK(TryReopen(options));
}

// Check that FIFO-with-TTL is supported only with BlockBasedTableFactory.
TEST_F(DBTest, FIFOCompactionWithTTLAndVariousTableFormatsTest) {
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.create_if_missing = true;
3217
  options.ttl = 600;  // seconds
S
Sagar Vemuri 已提交
3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231

  options = CurrentOptions(options);
  options.table_factory.reset(NewBlockBasedTableFactory());
  ASSERT_OK(TryReopen(options));

  Destroy(options);
  options.table_factory.reset(NewPlainTableFactory());
  ASSERT_TRUE(TryReopen(options).IsNotSupported());

  Destroy(options);
  options.table_factory.reset(NewAdaptiveTableFactory());
  ASSERT_TRUE(TryReopen(options).IsNotSupported());
}

3232
TEST_F(DBTest, FIFOCompactionWithTTLTest) {
S
Sagar Vemuri 已提交
3233 3234 3235 3236 3237 3238
  Options options;
  options.compaction_style = kCompactionStyleFIFO;
  options.write_buffer_size = 10 << 10;  // 10KB
  options.arena_block_size = 4096;
  options.compression = kNoCompression;
  options.create_if_missing = true;
3239 3240
  env_->time_elapse_only_sleep_ = false;
  options.env = env_;
S
Sagar Vemuri 已提交
3241 3242 3243 3244

  // Test to make sure that all files with expired ttl are deleted on next
  // manual compaction.
  {
3245
    env_->addon_time_.store(0);
S
Sagar Vemuri 已提交
3246 3247
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = false;
3248
    options.ttl = 1 * 60 * 60 ;  // 1 hour
S
Sagar Vemuri 已提交
3249 3250 3251 3252 3253 3254 3255 3256 3257 3258
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 10; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
3259
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3260 3261 3262
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

3263 3264 3265 3266 3267 3268 3269 3270
    // Sleep for 2 hours -- which is much greater than TTL.
    // Note: Couldn't use SleepForMicroseconds because it takes an int instead
    // of uint64_t. Hence used addon_time_ directly.
    // env_->SleepForMicroseconds(2 * 60 * 60 * 1000 * 1000);
    env_->addon_time_.fetch_add(2 * 60 * 60);

    // Since no flushes and compactions have run, the db should still be in
    // the same state even after considerable time has passed.
S
Sagar Vemuri 已提交
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

    dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
    ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  }

  // Test to make sure that all files with expired ttl are deleted on next
  // automatic compaction.
  {
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = false;
3283
    options.ttl = 1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 10; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
3294
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3295 3296 3297
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

3298 3299 3300
    // Sleep for 2 hours -- which is much greater than TTL.
    env_->addon_time_.fetch_add(2 * 60 * 60);
    // Just to make sure that we are in the same state even after sleeping.
S
Sagar Vemuri 已提交
3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

    // Create 1 more file to trigger TTL compaction. The old files are dropped.
    for (int i = 0; i < 1; i++) {
      for (int j = 0; j < 10; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
    }

    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    // Only the new 10 files remain.
    ASSERT_EQ(NumTableFilesAtLevel(0), 1);
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }

  // Test that shows the fall back to size-based FIFO compaction if TTL-based
  // deletion doesn't move the total size to be less than max_table_files_size.
  {
3322
    options.write_buffer_size = 10 << 10;                              // 10KB
S
Sagar Vemuri 已提交
3323 3324
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = false;
3325
    options.ttl =  1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3326 3327 3328 3329 3330 3331 3332 3333 3334 3335
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 3; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
3336
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3337 3338 3339
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 3);

3340 3341 3342
    // Sleep for 2 hours -- which is much greater than TTL.
    env_->addon_time_.fetch_add(2 * 60 * 60);
    // Just to make sure that we are in the same state even after sleeping.
S
Sagar Vemuri 已提交
3343 3344 3345 3346 3347 3348 3349 3350
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 3);

    for (int i = 0; i < 5; i++) {
      for (int j = 0; j < 140; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
3351
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3352 3353 3354 3355 3356 3357 3358 3359 3360 3361
    }
    // Size limit is still guaranteed.
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }

  // Test with TTL + Intra-L0 compactions.
  {
    options.compaction_options_fifo.max_table_files_size = 150 << 10;  // 150KB
    options.compaction_options_fifo.allow_compaction = true;
3362
    options.ttl = 1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
    options.level0_file_num_compaction_trigger = 6;
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 10; i++) {
      // Generate and flush a file about 10KB.
      for (int j = 0; j < 10; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
3374
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3375 3376 3377 3378 3379 3380
    }
    // With Intra-L0 compaction, out of 10 files, 6 files will be compacted to 1
    // (due to level0_file_num_compaction_trigger = 6).
    // So total files = 1 + remaining 4 = 5.
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);

3381 3382 3383
    // Sleep for 2 hours -- which is much greater than TTL.
    env_->addon_time_.fetch_add(2 * 60 * 60);
    // Just to make sure that we are in the same state even after sleeping.
S
Sagar Vemuri 已提交
3384 3385 3386 3387 3388 3389 3390 3391 3392
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);

    // Create 10 more files. The old 5 files are dropped as their ttl expired.
    for (int i = 0; i < 10; i++) {
      for (int j = 0; j < 10; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
3393
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405
    }
    ASSERT_EQ(NumTableFilesAtLevel(0), 5);
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }

  // Test with large TTL + Intra-L0 compactions.
  // Files dropped based on size, as ttl doesn't kick in.
  {
    options.write_buffer_size = 20 << 10;                               // 20K
    options.compaction_options_fifo.max_table_files_size = 1500 << 10;  // 1.5MB
    options.compaction_options_fifo.allow_compaction = true;
3406
    options.ttl = 1 * 60 * 60;  // 1 hour
S
Sagar Vemuri 已提交
3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417
    options.level0_file_num_compaction_trigger = 6;
    options = CurrentOptions(options);
    DestroyAndReopen(options);

    Random rnd(301);
    for (int i = 0; i < 60; i++) {
      // Generate and flush a file about 20KB.
      for (int j = 0; j < 20; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
      }
      Flush();
3418
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428
    }
    // It should be compacted to 10 files.
    ASSERT_EQ(NumTableFilesAtLevel(0), 10);

    for (int i = 0; i < 60; i++) {
      // Generate and flush a file about 20KB.
      for (int j = 0; j < 20; j++) {
        ASSERT_OK(Put(ToString(i * 20 + j + 2000), RandomString(&rnd, 980)));
      }
      Flush();
3429
      ASSERT_OK(dbfull()->TEST_WaitForCompact());
S
Sagar Vemuri 已提交
3430 3431 3432 3433 3434 3435 3436 3437 3438 3439
    }

    // It should be compacted to no more than 20 files.
    ASSERT_GT(NumTableFilesAtLevel(0), 10);
    ASSERT_LT(NumTableFilesAtLevel(0), 18);
    // Size limit is still guaranteed.
    ASSERT_LE(SizeAtLevel(0),
              options.compaction_options_fifo.max_table_files_size);
  }
}
Y
Yi Wu 已提交
3440
#endif  // ROCKSDB_LITE
I
Igor Canadi 已提交
3441

Y
Yi Wu 已提交
3442 3443 3444
#ifndef ROCKSDB_LITE
/*
 * This test is not reliable enough as it heavily depends on disk behavior.
S
Siying Dong 已提交
3445
 * Disable as it is flaky.
Y
Yi Wu 已提交
3446
 */
S
Siying Dong 已提交
3447
TEST_F(DBTest, DISABLED_RateLimitingTest) {
3448
  Options options = CurrentOptions();
Y
Yi Wu 已提交
3449
  options.write_buffer_size = 1 << 20;  // 1MB
3450
  options.level0_file_num_compaction_trigger = 2;
Y
Yi Wu 已提交
3451 3452 3453
  options.target_file_size_base = 1 << 20;     // 1MB
  options.max_bytes_for_level_base = 4 << 20;  // 4MB
  options.max_bytes_for_level_multiplier = 4;
3454
  options.compression = kNoCompression;
Y
Yi Wu 已提交
3455 3456
  options.create_if_missing = true;
  options.env = env_;
3457
  options.statistics = rocksdb::CreateDBStatistics();
Y
Yi Wu 已提交
3458 3459
  options.IncreaseParallelism(4);
  DestroyAndReopen(options);
3460

Y
Yi Wu 已提交
3461 3462
  WriteOptions wo;
  wo.disableWAL = true;
3463

Y
Yi Wu 已提交
3464 3465 3466 3467 3468 3469 3470
  // # no rate limiting
  Random rnd(301);
  uint64_t start = env_->NowMicros();
  // Write ~96M data
  for (int64_t i = 0; i < (96 << 10); ++i) {
    ASSERT_OK(
        Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
3471
  }
Y
Yi Wu 已提交
3472 3473
  uint64_t elapsed = env_->NowMicros() - start;
  double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
3474 3475 3476
  uint64_t rate_limiter_drains =
      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS);
  ASSERT_EQ(0, rate_limiter_drains);
Y
Yi Wu 已提交
3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490
  Close();

  // # rate limiting with 0.7 x threshold
  options.rate_limiter.reset(
      NewGenericRateLimiter(static_cast<int64_t>(0.7 * raw_rate)));
  env_->bytes_written_ = 0;
  DestroyAndReopen(options);

  start = env_->NowMicros();
  // Write ~96M data
  for (int64_t i = 0; i < (96 << 10); ++i) {
    ASSERT_OK(
        Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
  }
3491 3492 3493
  rate_limiter_drains =
      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
      rate_limiter_drains;
3494
  elapsed = env_->NowMicros() - start;
Y
Yi Wu 已提交
3495 3496
  Close();
  ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
3497 3498
  // Most intervals should've been drained (interval time is 100ms, elapsed is
  // micros)
3499
  ASSERT_GT(rate_limiter_drains, 0);
A
Andrew Kryczka 已提交
3500
  ASSERT_LE(rate_limiter_drains, elapsed / 100000 + 1);
Y
Yi Wu 已提交
3501 3502 3503 3504 3505 3506 3507 3508 3509
  double ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
  fprintf(stderr, "write rate ratio = %.2lf, expected 0.7\n", ratio);
  ASSERT_TRUE(ratio < 0.8);

  // # rate limiting with half of the raw_rate
  options.rate_limiter.reset(
      NewGenericRateLimiter(static_cast<int64_t>(raw_rate / 2)));
  env_->bytes_written_ = 0;
  DestroyAndReopen(options);
3510

Y
Yi Wu 已提交
3511 3512 3513 3514 3515
  start = env_->NowMicros();
  // Write ~96M data
  for (int64_t i = 0; i < (96 << 10); ++i) {
    ASSERT_OK(
        Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo));
3516
  }
Y
Yi Wu 已提交
3517
  elapsed = env_->NowMicros() - start;
3518 3519 3520
  rate_limiter_drains =
      TestGetTickerCount(options, NUMBER_RATE_LIMITER_DRAINS) -
      rate_limiter_drains;
Y
Yi Wu 已提交
3521 3522
  Close();
  ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_);
3523 3524 3525
  // Most intervals should've been drained (interval time is 100ms, elapsed is
  // micros)
  ASSERT_GT(rate_limiter_drains, elapsed / 100000 / 2);
A
Andrew Kryczka 已提交
3526
  ASSERT_LE(rate_limiter_drains, elapsed / 100000 + 1);
Y
Yi Wu 已提交
3527 3528 3529 3530
  ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate;
  fprintf(stderr, "write rate ratio = %.2lf, expected 0.5\n", ratio);
  ASSERT_LT(ratio, 0.6);
}
3531

Y
Yi Wu 已提交
3532 3533 3534 3535 3536
TEST_F(DBTest, TableOptionsSanitizeTest) {
  Options options = CurrentOptions();
  options.create_if_missing = true;
  DestroyAndReopen(options);
  ASSERT_EQ(db_->GetOptions().allow_mmap_reads, false);
3537

Y
Yi Wu 已提交
3538 3539 3540 3541
  options.table_factory.reset(new PlainTableFactory());
  options.prefix_extractor.reset(NewNoopTransform());
  Destroy(options);
  ASSERT_TRUE(!TryReopen(options).IsNotSupported());
3542

Y
Yi Wu 已提交
3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
  // Test for check of prefix_extractor when hash index is used for
  // block-based table
  BlockBasedTableOptions to;
  to.index_type = BlockBasedTableOptions::kHashSearch;
  options = CurrentOptions();
  options.create_if_missing = true;
  options.table_factory.reset(NewBlockBasedTableFactory(to));
  ASSERT_TRUE(TryReopen(options).IsInvalidArgument());
  options.prefix_extractor.reset(NewFixedPrefixTransform(1));
  ASSERT_OK(TryReopen(options));
}
3554

Y
Yi Wu 已提交
3555
TEST_F(DBTest, ConcurrentMemtableNotSupported) {
3556
  Options options = CurrentOptions();
Y
Yi Wu 已提交
3557 3558 3559 3560
  options.allow_concurrent_memtable_write = true;
  options.soft_pending_compaction_bytes_limit = 0;
  options.hard_pending_compaction_bytes_limit = 100;
  options.create_if_missing = true;
3561

Y
Yi Wu 已提交
3562 3563 3564
  DestroyDB(dbname_, options);
  options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true, 4));
  ASSERT_NOK(TryReopen(options));
3565

Y
Yi Wu 已提交
3566 3567
  options.memtable_factory.reset(new SkipListFactory);
  ASSERT_OK(TryReopen(options));
3568

Y
Yi Wu 已提交
3569 3570 3571 3572 3573 3574
  ColumnFamilyOptions cf_options(options);
  cf_options.memtable_factory.reset(
      NewHashLinkListRepFactory(4, 0, 3, true, 4));
  ColumnFamilyHandle* handle;
  ASSERT_NOK(db_->CreateColumnFamily(cf_options, "name", &handle));
}
3575

Y
Yi Wu 已提交
3576
#endif  // ROCKSDB_LITE
3577

Y
Yi Wu 已提交
3578 3579 3580 3581
TEST_F(DBTest, SanitizeNumThreads) {
  for (int attempt = 0; attempt < 2; attempt++) {
    const size_t kTotalTasks = 8;
    test::SleepingBackgroundTask sleeping_tasks[kTotalTasks];
3582

Y
Yi Wu 已提交
3583 3584 3585 3586
    Options options = CurrentOptions();
    if (attempt == 0) {
      options.max_background_compactions = 3;
      options.max_background_flushes = 2;
3587
    }
Y
Yi Wu 已提交
3588 3589
    options.create_if_missing = true;
    DestroyAndReopen(options);
3590

Y
Yi Wu 已提交
3591 3592 3593 3594 3595 3596
    for (size_t i = 0; i < kTotalTasks; i++) {
      // Insert 5 tasks to low priority queue and 5 tasks to high priority queue
      env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
                     &sleeping_tasks[i],
                     (i < 4) ? Env::Priority::LOW : Env::Priority::HIGH);
    }
3597

3598 3599 3600 3601 3602 3603 3604 3605
    // Wait until 10s for they are scheduled.
    for (int i = 0; i < 10000; i++) {
      if (options.env->GetThreadPoolQueueLen(Env::Priority::LOW) <= 1 &&
          options.env->GetThreadPoolQueueLen(Env::Priority::HIGH) <= 2) {
        break;
      }
      env_->SleepForMicroseconds(1000);
    }
3606

Y
Yi Wu 已提交
3607 3608 3609 3610 3611 3612 3613 3614
    // pool size 3, total task 4. Queue size should be 1.
    ASSERT_EQ(1U, options.env->GetThreadPoolQueueLen(Env::Priority::LOW));
    // pool size 2, total task 4. Queue size should be 2.
    ASSERT_EQ(2U, options.env->GetThreadPoolQueueLen(Env::Priority::HIGH));

    for (size_t i = 0; i < kTotalTasks; i++) {
      sleeping_tasks[i].WakeUp();
      sleeping_tasks[i].WaitUntilDone();
3615
    }
Y
Yi Wu 已提交
3616 3617 3618 3619 3620

    ASSERT_OK(Put("abc", "def"));
    ASSERT_EQ("def", Get("abc"));
    Flush();
    ASSERT_EQ("def", Get("abc"));
3621 3622 3623
  }
}

Y
Yi Wu 已提交
3624
TEST_F(DBTest, WriteSingleThreadEntry) {
D
Dmitri Smirnov 已提交
3625
  std::vector<port::Thread> threads;
Y
Yi Wu 已提交
3626 3627 3628 3629 3630 3631 3632 3633 3634 3635
  dbfull()->TEST_LockMutex();
  auto w = dbfull()->TEST_BeginWrite();
  threads.emplace_back([&] { Put("a", "b"); });
  env_->SleepForMicroseconds(10000);
  threads.emplace_back([&] { Flush(); });
  env_->SleepForMicroseconds(10000);
  dbfull()->TEST_UnlockMutex();
  dbfull()->TEST_LockMutex();
  dbfull()->TEST_EndWrite(w);
  dbfull()->TEST_UnlockMutex();
3636

Y
Yi Wu 已提交
3637 3638 3639
  for (auto& t : threads) {
    t.join();
  }
3640 3641
}

3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
TEST_F(DBTest, ConcurrentFlushWAL) {
  const size_t cnt = 100;
  Options options;
  WriteOptions wopt;
  ReadOptions ropt;
  for (bool two_write_queues : {false, true}) {
    for (bool manual_wal_flush : {false, true}) {
      options.two_write_queues = two_write_queues;
      options.manual_wal_flush = manual_wal_flush;
      options.create_if_missing = true;
      DestroyAndReopen(options);
      std::vector<port::Thread> threads;
      threads.emplace_back([&] {
        for (size_t i = 0; i < cnt; i++) {
          auto istr = ToString(i);
          db_->Put(wopt, db_->DefaultColumnFamily(), "a" + istr, "b" + istr);
        }
      });
      if (two_write_queues) {
        threads.emplace_back([&] {
          for (size_t i = cnt; i < 2 * cnt; i++) {
            auto istr = ToString(i);
            WriteBatch batch;
            batch.Put("a" + istr, "b" + istr);
            dbfull()->WriteImpl(wopt, &batch, nullptr, nullptr, 0, true);
          }
        });
      }
      threads.emplace_back([&] {
        for (size_t i = 0; i < cnt * 100; i++) {  // FlushWAL is faster than Put
          db_->FlushWAL(false);
        }
      });
      for (auto& t : threads) {
        t.join();
      }
      options.create_if_missing = false;
      // Recover from the wal and make sure that it is not corrupted
      Reopen(options);
      for (size_t i = 0; i < cnt; i++) {
        PinnableSlice pval;
        auto istr = ToString(i);
        ASSERT_OK(
            db_->Get(ropt, db_->DefaultColumnFamily(), "a" + istr, &pval));
        ASSERT_TRUE(pval == ("b" + istr));
      }
    }
  }
}

Y
Yi Wu 已提交
3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709
#ifndef ROCKSDB_LITE
TEST_F(DBTest, DynamicMemtableOptions) {
  const uint64_t k64KB = 1 << 16;
  const uint64_t k128KB = 1 << 17;
  const uint64_t k5KB = 5 * 1024;
  Options options;
  options.env = env_;
  options.create_if_missing = true;
  options.compression = kNoCompression;
  options.max_background_compactions = 1;
  options.write_buffer_size = k64KB;
  options.arena_block_size = 16 * 1024;
  options.max_write_buffer_number = 2;
  // Don't trigger compact/slowdown/stop
  options.level0_file_num_compaction_trigger = 1024;
  options.level0_slowdown_writes_trigger = 1024;
  options.level0_stop_writes_trigger = 1024;
  DestroyAndReopen(options);
I
Igor Canadi 已提交
3710

3711
  auto gen_l0_kb = [this](int size) {
3712
    const int kNumPutsBeforeWaitForFlush = 64;
Y
Yi Wu 已提交
3713 3714 3715
    Random rnd(301);
    for (int i = 0; i < size; i++) {
      ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
I
Igor Canadi 已提交
3716

Y
Yi Wu 已提交
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727
      // The following condition prevents a race condition between flush jobs
      // acquiring work and this thread filling up multiple memtables. Without
      // this, the flush might produce less files than expected because
      // multiple memtables are flushed into a single L0 file. This race
      // condition affects assertion (A).
      if (i % kNumPutsBeforeWaitForFlush == kNumPutsBeforeWaitForFlush - 1) {
        dbfull()->TEST_WaitForFlushMemTable();
      }
    }
    dbfull()->TEST_WaitForFlushMemTable();
  };
I
Igor Canadi 已提交
3728

Y
Yi Wu 已提交
3729 3730 3731 3732 3733
  // Test write_buffer_size
  gen_l0_kb(64);
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  ASSERT_LT(SizeAtLevel(0), k64KB + k5KB);
  ASSERT_GT(SizeAtLevel(0), k64KB - k5KB * 2);
I
Igor Canadi 已提交
3734

Y
Yi Wu 已提交
3735 3736 3737
  // Clean up L0
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
I
Igor Canadi 已提交
3738

Y
Yi Wu 已提交
3739 3740 3741 3742
  // Increase buffer size
  ASSERT_OK(dbfull()->SetOptions({
      {"write_buffer_size", "131072"},
  }));
I
Igor Canadi 已提交
3743

3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
  // The existing memtable inflated 64KB->128KB when we invoked SetOptions().
  // Write 192KB, we should have a 128KB L0 file and a memtable with 64KB data.
  gen_l0_kb(192);
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);  // (A)
  ASSERT_LT(SizeAtLevel(0), k128KB + 2 * k5KB);
  ASSERT_GT(SizeAtLevel(0), k128KB - 4 * k5KB);

  // Decrease buffer size below current usage
  ASSERT_OK(dbfull()->SetOptions({
      {"write_buffer_size", "65536"},
  }));
  // The existing memtable became eligible for flush when we reduced its
  // capacity to 64KB. Two keys need to be added to trigger flush: first causes
  // memtable to be marked full, second schedules the flush. Then we should have
  // a 128KB L0 file, a 64KB L0 file, and a memtable with just one key.
  gen_l0_kb(2);
  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
Y
Yi Wu 已提交
3761 3762
  ASSERT_LT(SizeAtLevel(0), k128KB + k64KB + 2 * k5KB);
  ASSERT_GT(SizeAtLevel(0), k128KB + k64KB - 4 * k5KB);
3763

Y
Yi Wu 已提交
3764 3765 3766 3767
  // Test max_write_buffer_number
  // Block compaction thread, which will also block the flushes because
  // max_background_flushes == 0, so flushes are getting executed by the
  // compaction thread
3768
  env_->SetBackgroundThreads(1, Env::LOW);
3769 3770
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
3771
                 Env::Priority::LOW);
Y
Yi Wu 已提交
3772 3773 3774
  // Start from scratch and disable compaction/flush. Flush can only happen
  // during compaction but trigger is pretty high
  options.disable_auto_compactions = true;
3775
  DestroyAndReopen(options);
3776
  env_->SetBackgroundThreads(0, Env::HIGH);
3777

Y
Yi Wu 已提交
3778 3779 3780
  // Put until writes are stopped, bounded by 256 puts. We should see stop at
  // ~128KB
  int count = 0;
3781 3782
  Random rnd(301);

Y
Yi Wu 已提交
3783 3784
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "DBImpl::DelayWrite:Wait",
3785
      [&](void* /*arg*/) { sleeping_task_low.WakeUp(); });
Y
Yi Wu 已提交
3786 3787 3788 3789 3790
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();

  while (!sleeping_task_low.WokenUp() && count < 256) {
    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
    count++;
3791
  }
Y
Yi Wu 已提交
3792 3793
  ASSERT_GT(static_cast<double>(count), 128 * 0.8);
  ASSERT_LT(static_cast<double>(count), 128 * 1.2);
3794

Y
Yi Wu 已提交
3795
  sleeping_task_low.WaitUntilDone();
3796

Y
Yi Wu 已提交
3797 3798 3799 3800 3801 3802
  // Increase
  ASSERT_OK(dbfull()->SetOptions({
      {"max_write_buffer_number", "8"},
  }));
  // Clean up memtable and L0
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
3803

Y
Yi Wu 已提交
3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817
  sleeping_task_low.Reset();
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  count = 0;
  while (!sleeping_task_low.WokenUp() && count < 1024) {
    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
    count++;
  }
// Windows fails this test. Will tune in the future and figure out
// approp number
#ifndef OS_WIN
  ASSERT_GT(static_cast<double>(count), 512 * 0.8);
  ASSERT_LT(static_cast<double>(count), 512 * 1.2);
#endif
3818 3819
  sleeping_task_low.WaitUntilDone();

Y
Yi Wu 已提交
3820 3821 3822 3823 3824 3825
  // Decrease
  ASSERT_OK(dbfull()->SetOptions({
      {"max_write_buffer_number", "4"},
  }));
  // Clean up memtable and L0
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
3826

Y
Yi Wu 已提交
3827 3828 3829
  sleeping_task_low.Reset();
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
3830

Y
Yi Wu 已提交
3831 3832 3833 3834
  count = 0;
  while (!sleeping_task_low.WokenUp() && count < 1024) {
    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions()));
    count++;
3835
  }
Y
Yi Wu 已提交
3836 3837 3838 3839 3840 3841 3842
// Windows fails this test. Will tune in the future and figure out
// approp number
#ifndef OS_WIN
  ASSERT_GT(static_cast<double>(count), 256 * 0.8);
  ASSERT_LT(static_cast<double>(count), 266 * 1.2);
#endif
  sleeping_task_low.WaitUntilDone();
3843

Y
Yi Wu 已提交
3844 3845 3846
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
}
#endif  // ROCKSDB_LITE
3847

D
Daniel Black 已提交
3848
#ifdef ROCKSDB_USING_THREAD_STATUS
Y
Yi Wu 已提交
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862
namespace {
void VerifyOperationCount(Env* env, ThreadStatus::OperationType op_type,
                          int expected_count) {
  int op_count = 0;
  std::vector<ThreadStatus> thread_list;
  ASSERT_OK(env->GetThreadList(&thread_list));
  for (auto thread : thread_list) {
    if (thread.operation_type == op_type) {
      op_count++;
    }
  }
  ASSERT_EQ(op_count, expected_count);
}
}  // namespace
3863

Y
Yi Wu 已提交
3864 3865 3866 3867 3868
TEST_F(DBTest, GetThreadStatus) {
  Options options;
  options.env = env_;
  options.enable_thread_tracking = true;
  TryReopen(options);
3869

Y
Yi Wu 已提交
3870 3871 3872 3873 3874 3875 3876 3877
  std::vector<ThreadStatus> thread_list;
  Status s = env_->GetThreadList(&thread_list);

  for (int i = 0; i < 2; ++i) {
    // repeat the test with differet number of high / low priority threads
    const int kTestCount = 3;
    const unsigned int kHighPriCounts[kTestCount] = {3, 2, 5};
    const unsigned int kLowPriCounts[kTestCount] = {10, 15, 3};
3878
    const unsigned int kBottomPriCounts[kTestCount] = {2, 1, 4};
Y
Yi Wu 已提交
3879 3880 3881 3882
    for (int test = 0; test < kTestCount; ++test) {
      // Change the number of threads in high / low priority pool.
      env_->SetBackgroundThreads(kHighPriCounts[test], Env::HIGH);
      env_->SetBackgroundThreads(kLowPriCounts[test], Env::LOW);
3883
      env_->SetBackgroundThreads(kBottomPriCounts[test], Env::BOTTOM);
Y
Yi Wu 已提交
3884 3885
      // Wait to ensure the all threads has been registered
      unsigned int thread_type_counts[ThreadStatus::NUM_THREAD_TYPES];
3886 3887
      // TODO(ajkr): it'd be better if SetBackgroundThreads returned only after
      // all threads have been registered.
3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901
      // Try up to 60 seconds.
      for (int num_try = 0; num_try < 60000; num_try++) {
        env_->SleepForMicroseconds(1000);
        thread_list.clear();
        s = env_->GetThreadList(&thread_list);
        ASSERT_OK(s);
        memset(thread_type_counts, 0, sizeof(thread_type_counts));
        for (auto thread : thread_list) {
          ASSERT_LT(thread.thread_type, ThreadStatus::NUM_THREAD_TYPES);
          thread_type_counts[thread.thread_type]++;
        }
        if (thread_type_counts[ThreadStatus::HIGH_PRIORITY] ==
                kHighPriCounts[test] &&
            thread_type_counts[ThreadStatus::LOW_PRIORITY] ==
3902 3903 3904
                kLowPriCounts[test] &&
            thread_type_counts[ThreadStatus::BOTTOM_PRIORITY] ==
                kBottomPriCounts[test]) {
3905 3906
          break;
        }
Y
Yi Wu 已提交
3907 3908 3909 3910 3911 3912 3913
      }
      // Verify the number of high-priority threads
      ASSERT_EQ(thread_type_counts[ThreadStatus::HIGH_PRIORITY],
                kHighPriCounts[test]);
      // Verify the number of low-priority threads
      ASSERT_EQ(thread_type_counts[ThreadStatus::LOW_PRIORITY],
                kLowPriCounts[test]);
3914 3915 3916
      // Verify the number of bottom-priority threads
      ASSERT_EQ(thread_type_counts[ThreadStatus::BOTTOM_PRIORITY],
                kBottomPriCounts[test]);
Y
Yi Wu 已提交
3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944
    }
    if (i == 0) {
      // repeat the test with multiple column families
      CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options);
      env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                     true);
    }
  }
  db_->DropColumnFamily(handles_[2]);
  delete handles_[2];
  handles_.erase(handles_.begin() + 2);
  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                 true);
  Close();
  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                 true);
}

TEST_F(DBTest, DisableThreadStatus) {
  Options options;
  options.env = env_;
  options.enable_thread_tracking = false;
  TryReopen(options);
  CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options);
  // Verify non of the column family info exists
  env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
                                                                 false);
}
3945

Y
Yi Wu 已提交
3946 3947 3948 3949 3950 3951
TEST_F(DBTest, ThreadStatusFlush) {
  Options options;
  options.env = env_;
  options.write_buffer_size = 100000;  // Small write buffer
  options.enable_thread_tracking = true;
  options = CurrentOptions(options);
3952

Y
Yi Wu 已提交
3953 3954
  rocksdb::SyncPoint::GetInstance()->LoadDependency({
      {"FlushJob::FlushJob()", "DBTest::ThreadStatusFlush:1"},
3955
      {"DBTest::ThreadStatusFlush:2", "FlushJob::WriteLevel0Table"},
Y
Yi Wu 已提交
3956 3957
  });
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
3958

Y
Yi Wu 已提交
3959 3960
  CreateAndReopenWithCF({"pikachu"}, options);
  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0);
3961

Y
Yi Wu 已提交
3962 3963 3964
  ASSERT_OK(Put(1, "foo", "v1"));
  ASSERT_EQ("v1", Get(1, "foo"));
  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0);
3965

Y
Yi Wu 已提交
3966 3967 3968
  uint64_t num_running_flushes = 0;
  db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes);
  ASSERT_EQ(num_running_flushes, 0);
3969

Y
Yi Wu 已提交
3970 3971
  Put(1, "k1", std::string(100000, 'x'));  // Fill memtable
  Put(1, "k2", std::string(100000, 'y'));  // Trigger flush
3972

Y
Yi Wu 已提交
3973 3974 3975 3976 3977 3978 3979 3980 3981 3982
  // The first sync point is to make sure there's one flush job
  // running when we perform VerifyOperationCount().
  TEST_SYNC_POINT("DBTest::ThreadStatusFlush:1");
  VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 1);
  db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes);
  ASSERT_EQ(num_running_flushes, 1);
  // This second sync point is to ensure the flush job will not
  // be completed until we already perform VerifyOperationCount().
  TEST_SYNC_POINT("DBTest::ThreadStatusFlush:2");
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
3983
}
3984

Y
Yi Wu 已提交
3985 3986 3987 3988 3989 3990
TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) {
  const int kTestKeySize = 16;
  const int kTestValueSize = 984;
  const int kEntrySize = kTestKeySize + kTestValueSize;
  const int kEntriesPerBuffer = 100;
  Options options;
3991
  options.create_if_missing = true;
Y
Yi Wu 已提交
3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
  options.compaction_style = kCompactionStyleLevel;
  options.target_file_size_base = options.write_buffer_size;
  options.max_bytes_for_level_base = options.target_file_size_base * 2;
  options.max_bytes_for_level_multiplier = 2;
  options.compression = kNoCompression;
  options = CurrentOptions(options);
  options.env = env_;
  options.enable_thread_tracking = true;
  const int kNumL0Files = 4;
  options.level0_file_num_compaction_trigger = kNumL0Files;
4003
  options.max_subcompactions = max_subcompactions_;
4004

Y
Yi Wu 已提交
4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
  rocksdb::SyncPoint::GetInstance()->LoadDependency({
      {"DBTest::ThreadStatusSingleCompaction:0", "DBImpl::BGWorkCompaction"},
      {"CompactionJob::Run():Start", "DBTest::ThreadStatusSingleCompaction:1"},
      {"DBTest::ThreadStatusSingleCompaction:2", "CompactionJob::Run():End"},
  });
  for (int tests = 0; tests < 2; ++tests) {
    DestroyAndReopen(options);
    rocksdb::SyncPoint::GetInstance()->ClearTrace();
    rocksdb::SyncPoint::GetInstance()->EnableProcessing();

    Random rnd(301);
    // The Put Phase.
    for (int file = 0; file < kNumL0Files; ++file) {
      for (int key = 0; key < kEntriesPerBuffer; ++key) {
        ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer),
                      RandomString(&rnd, kTestValueSize)));
      }
      Flush();
4023
    }
Y
Yi Wu 已提交
4024 4025 4026 4027 4028 4029 4030 4031 4032
    // This makes sure a compaction won't be scheduled until
    // we have done with the above Put Phase.
    uint64_t num_running_compactions = 0;
    db_->GetIntProperty(DB::Properties::kNumRunningCompactions,
                        &num_running_compactions);
    ASSERT_EQ(num_running_compactions, 0);
    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:0");
    ASSERT_GE(NumTableFilesAtLevel(0),
              options.level0_file_num_compaction_trigger);
4033

Y
Yi Wu 已提交
4034 4035
    // This makes sure at least one compaction is running.
    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:1");
4036

Y
Yi Wu 已提交
4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048
    if (options.enable_thread_tracking) {
      // expecting one single L0 to L1 compaction
      VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 1);
    } else {
      // If thread tracking is not enabled, compaction count should be 0.
      VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 0);
    }
    db_->GetIntProperty(DB::Properties::kNumRunningCompactions,
                        &num_running_compactions);
    ASSERT_EQ(num_running_compactions, 1);
    // TODO(yhchiang): adding assert to verify each compaction stage.
    TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:2");
4049

Y
Yi Wu 已提交
4050 4051 4052 4053
    // repeat the test with disabling thread tracking.
    options.enable_thread_tracking = false;
    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  }
4054 4055
}

Y
Yi Wu 已提交
4056
TEST_P(DBTestWithParam, PreShutdownManualCompaction) {
4057
  Options options = CurrentOptions();
Y
Yi Wu 已提交
4058 4059
  options.max_subcompactions = max_subcompactions_;
  CreateAndReopenWithCF({"pikachu"}, options);
4060

Y
Yi Wu 已提交
4061 4062 4063 4064 4065
  // iter - 0 with 7 levels
  // iter - 1 with 3 levels
  for (int iter = 0; iter < 2; ++iter) {
    MakeTables(3, "p", "q", 1);
    ASSERT_EQ("1,1,1", FilesPerLevel(1));
4066

Y
Yi Wu 已提交
4067 4068 4069
    // Compaction range falls before files
    Compact(1, "", "c");
    ASSERT_EQ("1,1,1", FilesPerLevel(1));
4070

Y
Yi Wu 已提交
4071 4072 4073
    // Compaction range falls after files
    Compact(1, "r", "z");
    ASSERT_EQ("1,1,1", FilesPerLevel(1));
4074

Y
Yi Wu 已提交
4075 4076 4077
    // Compaction range overlaps files
    Compact(1, "p1", "p9");
    ASSERT_EQ("0,0,1", FilesPerLevel(1));
4078

Y
Yi Wu 已提交
4079 4080 4081
    // Populate a different range
    MakeTables(3, "c", "e", 1);
    ASSERT_EQ("1,1,2", FilesPerLevel(1));
4082

Y
Yi Wu 已提交
4083 4084 4085
    // Compact just the new range
    Compact(1, "b", "f");
    ASSERT_EQ("0,0,2", FilesPerLevel(1));
4086

Y
Yi Wu 已提交
4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
    // Compact all
    MakeTables(1, "a", "z", 1);
    ASSERT_EQ("1,0,2", FilesPerLevel(1));
    CancelAllBackgroundWork(db_);
    db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
    ASSERT_EQ("1,0,2", FilesPerLevel(1));

    if (iter == 0) {
      options = CurrentOptions();
      options.num_levels = 3;
      options.create_if_missing = true;
      DestroyAndReopen(options);
      CreateAndReopenWithCF({"pikachu"}, options);
    }
4101
  }
Y
Yi Wu 已提交
4102
}
4103

Y
Yi Wu 已提交
4104 4105 4106 4107 4108 4109 4110 4111 4112
TEST_F(DBTest, PreShutdownFlush) {
  Options options = CurrentOptions();
  CreateAndReopenWithCF({"pikachu"}, options);
  ASSERT_OK(Put(1, "key", "value"));
  CancelAllBackgroundWork(db_);
  Status s =
      db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
  ASSERT_TRUE(s.IsShutdownInProgress());
}
4113

Y
Yi Wu 已提交
4114 4115 4116 4117 4118 4119
TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
  const int kTestKeySize = 16;
  const int kTestValueSize = 984;
  const int kEntrySize = kTestKeySize + kTestValueSize;
  const int kEntriesPerBuffer = 40;
  const int kNumL0Files = 4;
4120

Y
Yi Wu 已提交
4121 4122 4123 4124
  const int kHighPriCount = 3;
  const int kLowPriCount = 5;
  env_->SetBackgroundThreads(kHighPriCount, Env::HIGH);
  env_->SetBackgroundThreads(kLowPriCount, Env::LOW);
4125

Y
Yi Wu 已提交
4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142
  Options options;
  options.create_if_missing = true;
  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
  options.compaction_style = kCompactionStyleLevel;
  options.target_file_size_base = options.write_buffer_size;
  options.max_bytes_for_level_base =
      options.target_file_size_base * kNumL0Files;
  options.compression = kNoCompression;
  options = CurrentOptions(options);
  options.env = env_;
  options.enable_thread_tracking = true;
  options.level0_file_num_compaction_trigger = kNumL0Files;
  options.max_bytes_for_level_multiplier = 2;
  options.max_background_compactions = kLowPriCount;
  options.level0_stop_writes_trigger = 1 << 10;
  options.level0_slowdown_writes_trigger = 1 << 10;
  options.max_subcompactions = max_subcompactions_;
4143

Y
Yi Wu 已提交
4144 4145
  TryReopen(options);
  Random rnd(301);
4146

Y
Yi Wu 已提交
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158
  std::vector<ThreadStatus> thread_list;
  // Delay both flush and compaction
  rocksdb::SyncPoint::GetInstance()->LoadDependency(
      {{"FlushJob::FlushJob()", "CompactionJob::Run():Start"},
       {"CompactionJob::Run():Start",
        "DBTest::PreShutdownMultipleCompaction:Preshutdown"},
       {"CompactionJob::Run():Start",
        "DBTest::PreShutdownMultipleCompaction:VerifyCompaction"},
       {"DBTest::PreShutdownMultipleCompaction:Preshutdown",
        "CompactionJob::Run():End"},
       {"CompactionJob::Run():End",
        "DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown"}});
4159

Y
Yi Wu 已提交
4160
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
4161

Y
Yi Wu 已提交
4162 4163 4164 4165 4166 4167 4168 4169
  // Make rocksdb busy
  int key = 0;
  // check how many threads are doing compaction using GetThreadList
  int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
  for (int file = 0; file < 16 * kNumL0Files; ++file) {
    for (int k = 0; k < kEntriesPerBuffer; ++k) {
      ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
    }
4170

Y
Yi Wu 已提交
4171 4172 4173 4174
    Status s = env_->GetThreadList(&thread_list);
    for (auto thread : thread_list) {
      operation_count[thread.operation_type]++;
    }
4175

Y
Yi Wu 已提交
4176 4177 4178 4179 4180 4181 4182 4183 4184
    // Speed up the test
    if (operation_count[ThreadStatus::OP_FLUSH] > 1 &&
        operation_count[ThreadStatus::OP_COMPACTION] >
            0.6 * options.max_background_compactions) {
      break;
    }
    if (file == 15 * kNumL0Files) {
      TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown");
    }
4185 4186
  }

Y
Yi Wu 已提交
4187 4188 4189 4190
  TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown");
  ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1);
  CancelAllBackgroundWork(db_);
  TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown");
4191
  dbfull()->TEST_WaitForCompact();
Y
Yi Wu 已提交
4192 4193 4194 4195 4196 4197 4198 4199 4200
  // Record the number of compactions at a time.
  for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
    operation_count[i] = 0;
  }
  Status s = env_->GetThreadList(&thread_list);
  for (auto thread : thread_list) {
    operation_count[thread.operation_type]++;
  }
  ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
4201 4202
}

Y
Yi Wu 已提交
4203 4204 4205 4206 4207 4208
TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
  const int kTestKeySize = 16;
  const int kTestValueSize = 984;
  const int kEntrySize = kTestKeySize + kTestValueSize;
  const int kEntriesPerBuffer = 40;
  const int kNumL0Files = 4;
4209

Y
Yi Wu 已提交
4210 4211 4212 4213
  const int kHighPriCount = 3;
  const int kLowPriCount = 5;
  env_->SetBackgroundThreads(kHighPriCount, Env::HIGH);
  env_->SetBackgroundThreads(kLowPriCount, Env::LOW);
4214

Y
Yi Wu 已提交
4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
  Options options;
  options.create_if_missing = true;
  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
  options.compaction_style = kCompactionStyleLevel;
  options.target_file_size_base = options.write_buffer_size;
  options.max_bytes_for_level_base =
      options.target_file_size_base * kNumL0Files;
  options.compression = kNoCompression;
  options = CurrentOptions(options);
  options.env = env_;
  options.enable_thread_tracking = true;
  options.level0_file_num_compaction_trigger = kNumL0Files;
  options.max_bytes_for_level_multiplier = 2;
  options.max_background_compactions = kLowPriCount;
  options.level0_stop_writes_trigger = 1 << 10;
  options.level0_slowdown_writes_trigger = 1 << 10;
  options.max_subcompactions = max_subcompactions_;
4232

Y
Yi Wu 已提交
4233
  TryReopen(options);
4234 4235
  Random rnd(301);

Y
Yi Wu 已提交
4236 4237 4238 4239 4240 4241 4242 4243 4244 4245
  std::vector<ThreadStatus> thread_list;
  // Delay both flush and compaction
  rocksdb::SyncPoint::GetInstance()->LoadDependency(
      {{"DBTest::PreShutdownCompactionMiddle:Preshutdown",
        "CompactionJob::Run():Inprogress"},
       {"CompactionJob::Run():Start",
        "DBTest::PreShutdownCompactionMiddle:VerifyCompaction"},
       {"CompactionJob::Run():Inprogress", "CompactionJob::Run():End"},
       {"CompactionJob::Run():End",
        "DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown"}});
4246

Y
Yi Wu 已提交
4247
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
4248

Y
Yi Wu 已提交
4249 4250 4251 4252 4253 4254 4255 4256
  // Make rocksdb busy
  int key = 0;
  // check how many threads are doing compaction using GetThreadList
  int operation_count[ThreadStatus::NUM_OP_TYPES] = {0};
  for (int file = 0; file < 16 * kNumL0Files; ++file) {
    for (int k = 0; k < kEntriesPerBuffer; ++k) {
      ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize)));
    }
4257

Y
Yi Wu 已提交
4258 4259 4260 4261
    Status s = env_->GetThreadList(&thread_list);
    for (auto thread : thread_list) {
      operation_count[thread.operation_type]++;
    }
4262

Y
Yi Wu 已提交
4263 4264 4265 4266 4267 4268 4269 4270 4271 4272
    // Speed up the test
    if (operation_count[ThreadStatus::OP_FLUSH] > 1 &&
        operation_count[ThreadStatus::OP_COMPACTION] >
            0.6 * options.max_background_compactions) {
      break;
    }
    if (file == 15 * kNumL0Files) {
      TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyCompaction");
    }
  }
4273

Y
Yi Wu 已提交
4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288
  ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1);
  CancelAllBackgroundWork(db_);
  TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:Preshutdown");
  TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown");
  dbfull()->TEST_WaitForCompact();
  // Record the number of compactions at a time.
  for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) {
    operation_count[i] = 0;
  }
  Status s = env_->GetThreadList(&thread_list);
  for (auto thread : thread_list) {
    operation_count[thread.operation_type]++;
  }
  ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
}
4289

Y
Yi Wu 已提交
4290
#endif  // ROCKSDB_USING_THREAD_STATUS
4291

Y
Yi Wu 已提交
4292 4293 4294 4295 4296 4297
#ifndef ROCKSDB_LITE
TEST_F(DBTest, FlushOnDestroy) {
  WriteOptions wo;
  wo.disableWAL = true;
  ASSERT_OK(Put("foo", "v1", wo));
  CancelAllBackgroundWork(db_);
4298 4299
}

Y
Yi Wu 已提交
4300 4301 4302 4303 4304 4305 4306 4307 4308 4309
TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
  if (!Snappy_Supported()) {
    return;
  }
  const int kNKeys = 120;
  int keys[kNKeys];
  for (int i = 0; i < kNKeys; i++) {
    keys[i] = i;
  }
  std::random_shuffle(std::begin(keys), std::end(keys));
4310 4311

  Random rnd(301);
Y
Yi Wu 已提交
4312 4313 4314 4315 4316 4317 4318 4319
  Options options;
  options.create_if_missing = true;
  options.db_write_buffer_size = 20480;
  options.write_buffer_size = 20480;
  options.max_write_buffer_number = 2;
  options.level0_file_num_compaction_trigger = 2;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_stop_writes_trigger = 2;
4320
  options.target_file_size_base = 20480;
Y
Yi Wu 已提交
4321 4322 4323 4324 4325
  options.level_compaction_dynamic_level_bytes = true;
  options.max_bytes_for_level_base = 102400;
  options.max_bytes_for_level_multiplier = 4;
  options.max_background_compactions = 1;
  options.num_levels = 5;
4326

Y
Yi Wu 已提交
4327 4328 4329 4330 4331 4332 4333
  options.compression_per_level.resize(3);
  options.compression_per_level[0] = kNoCompression;
  options.compression_per_level[1] = kNoCompression;
  options.compression_per_level[2] = kSnappyCompression;

  OnFileDeletionListener* listener = new OnFileDeletionListener();
  options.listeners.emplace_back(listener);
4334

4335 4336
  DestroyAndReopen(options);

Y
Yi Wu 已提交
4337 4338 4339 4340
  // Insert more than 80K. L4 should be base level. Neither L0 nor L4 should
  // be compressed, so total data size should be more than 80K.
  for (int i = 0; i < 20; i++) {
    ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000)));
4341
  }
Y
Yi Wu 已提交
4342 4343
  Flush();
  dbfull()->TEST_WaitForCompact();
4344

Y
Yi Wu 已提交
4345 4346 4347
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
  ASSERT_EQ(NumTableFilesAtLevel(3), 0);
4348 4349
  // Assuming each files' metadata is at least 50 bytes/
  ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(4), 20U * 4000U + 50U * 4);
Y
Yi Wu 已提交
4350 4351 4352 4353

  // Insert 400KB. Some data will be compressed
  for (int i = 21; i < 120; i++) {
    ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000)));
4354
  }
Y
Yi Wu 已提交
4355 4356 4357 4358
  Flush();
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
4359 4360
  ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4),
            120U * 4000U + 50U * 24);
Y
Yi Wu 已提交
4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372
  // Make sure data in files in L3 is not compacted by removing all files
  // in L4 and calculate number of rows
  ASSERT_OK(dbfull()->SetOptions({
      {"disable_auto_compactions", "true"},
  }));
  ColumnFamilyMetaData cf_meta;
  db_->GetColumnFamilyMetaData(&cf_meta);
  for (auto file : cf_meta.levels[4].files) {
    listener->SetExpectedFileName(dbname_ + file.name);
    ASSERT_OK(dbfull()->DeleteFile(file.name));
  }
  listener->VerifyMatchedCount(cf_meta.levels[4].files.size());
4373

Y
Yi Wu 已提交
4374 4375 4376 4377
  int num_keys = 0;
  std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
    num_keys++;
4378
  }
Y
Yi Wu 已提交
4379
  ASSERT_OK(iter->status());
4380
  ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(3), num_keys * 4000U + num_keys * 10U);
4381 4382
}

Y
Yi Wu 已提交
4383 4384 4385 4386 4387 4388 4389 4390 4391 4392
TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
  if (!Snappy_Supported() || !LZ4_Supported() || !Zlib_Supported()) {
    return;
  }
  const int kNKeys = 500;
  int keys[kNKeys];
  for (int i = 0; i < kNKeys; i++) {
    keys[i] = i;
  }
  std::random_shuffle(std::begin(keys), std::end(keys));
4393

Y
Yi Wu 已提交
4394 4395 4396
  Random rnd(301);
  Options options;
  options.create_if_missing = true;
4397 4398
  options.db_write_buffer_size = 6000000;
  options.write_buffer_size = 600000;
Y
Yi Wu 已提交
4399 4400 4401 4402 4403
  options.max_write_buffer_number = 2;
  options.level0_file_num_compaction_trigger = 2;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_stop_writes_trigger = 2;
  options.soft_pending_compaction_bytes_limit = 1024 * 1024;
4404
  options.target_file_size_base = 20;
4405

Y
Yi Wu 已提交
4406 4407 4408 4409 4410 4411 4412
  options.level_compaction_dynamic_level_bytes = true;
  options.max_bytes_for_level_base = 200;
  options.max_bytes_for_level_multiplier = 8;
  options.max_background_compactions = 1;
  options.num_levels = 5;
  std::shared_ptr<mock::MockTableFactory> mtf(new mock::MockTableFactory);
  options.table_factory = mtf;
4413

Y
Yi Wu 已提交
4414 4415 4416 4417
  options.compression_per_level.resize(3);
  options.compression_per_level[0] = kNoCompression;
  options.compression_per_level[1] = kLZ4Compression;
  options.compression_per_level[2] = kZlibCompression;
4418

Y
Yi Wu 已提交
4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
  DestroyAndReopen(options);
  // When base level is L4, L4 is LZ4.
  std::atomic<int> num_zlib(0);
  std::atomic<int> num_lz4(0);
  std::atomic<int> num_no(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
        Compaction* compaction = reinterpret_cast<Compaction*>(arg);
        if (compaction->output_level() == 4) {
          ASSERT_TRUE(compaction->output_compression() == kLZ4Compression);
          num_lz4.fetch_add(1);
        }
      });
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) {
        auto* compression = reinterpret_cast<CompressionType*>(arg);
        ASSERT_TRUE(*compression == kNoCompression);
        num_no.fetch_add(1);
      });
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
4439

Y
Yi Wu 已提交
4440
  for (int i = 0; i < 100; i++) {
4441 4442 4443 4444 4445
    std::string value = RandomString(&rnd, 200);
    ASSERT_OK(Put(Key(keys[i]), value));
    if (i % 25 == 24) {
      Flush();
      dbfull()->TEST_WaitForCompact();
4446 4447
    }
  }
S
sdong 已提交
4448

Y
Yi Wu 已提交
4449 4450 4451 4452 4453
  Flush();
  dbfull()->TEST_WaitForFlushMemTable();
  dbfull()->TEST_WaitForCompact();
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
S
sdong 已提交
4454

Y
Yi Wu 已提交
4455 4456 4457 4458 4459 4460 4461
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
  ASSERT_EQ(NumTableFilesAtLevel(3), 0);
  ASSERT_GT(NumTableFilesAtLevel(4), 0);
  ASSERT_GT(num_no.load(), 2);
  ASSERT_GT(num_lz4.load(), 0);
  int prev_num_files_l4 = NumTableFilesAtLevel(4);
4462

Y
Yi Wu 已提交
4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
  // After base level turn L4->L3, L3 becomes LZ4 and L4 becomes Zlib
  num_lz4.store(0);
  num_no.store(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) {
        Compaction* compaction = reinterpret_cast<Compaction*>(arg);
        if (compaction->output_level() == 4 && compaction->start_level() == 3) {
          ASSERT_TRUE(compaction->output_compression() == kZlibCompression);
          num_zlib.fetch_add(1);
        } else {
          ASSERT_TRUE(compaction->output_compression() == kLZ4Compression);
          num_lz4.fetch_add(1);
        }
      });
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
      "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) {
        auto* compression = reinterpret_cast<CompressionType*>(arg);
        ASSERT_TRUE(*compression == kNoCompression);
        num_no.fetch_add(1);
      });
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
S
sdong 已提交
4484

Y
Yi Wu 已提交
4485
  for (int i = 101; i < 500; i++) {
4486 4487
    std::string value = RandomString(&rnd, 200);
    ASSERT_OK(Put(Key(keys[i]), value));
Y
Yi Wu 已提交
4488 4489 4490
    if (i % 100 == 99) {
      Flush();
      dbfull()->TEST_WaitForCompact();
S
sdong 已提交
4491 4492 4493
    }
  }

Y
Yi Wu 已提交
4494
  rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
S
sdong 已提交
4495
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
Y
Yi Wu 已提交
4496 4497 4498 4499 4500 4501 4502
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
  ASSERT_GT(NumTableFilesAtLevel(3), 0);
  ASSERT_GT(NumTableFilesAtLevel(4), prev_num_files_l4);
  ASSERT_GT(num_no.load(), 2);
  ASSERT_GT(num_lz4.load(), 0);
  ASSERT_GT(num_zlib.load(), 0);
S
sdong 已提交
4503 4504
}

Y
Yi Wu 已提交
4505 4506 4507 4508 4509 4510 4511 4512
TEST_F(DBTest, DynamicCompactionOptions) {
  // minimum write buffer size is enforced at 64KB
  const uint64_t k32KB = 1 << 15;
  const uint64_t k64KB = 1 << 16;
  const uint64_t k128KB = 1 << 17;
  const uint64_t k1MB = 1 << 20;
  const uint64_t k4KB = 1 << 12;
  Options options;
4513
  options.env = env_;
Y
Yi Wu 已提交
4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524
  options.create_if_missing = true;
  options.compression = kNoCompression;
  options.soft_pending_compaction_bytes_limit = 1024 * 1024;
  options.write_buffer_size = k64KB;
  options.arena_block_size = 4 * k4KB;
  options.max_write_buffer_number = 2;
  // Compaction related options
  options.level0_file_num_compaction_trigger = 3;
  options.level0_slowdown_writes_trigger = 4;
  options.level0_stop_writes_trigger = 8;
  options.target_file_size_base = k64KB;
4525
  options.max_compaction_bytes = options.target_file_size_base * 10;
Y
Yi Wu 已提交
4526 4527 4528
  options.target_file_size_multiplier = 1;
  options.max_bytes_for_level_base = k128KB;
  options.max_bytes_for_level_multiplier = 4;
4529

Y
Yi Wu 已提交
4530
  // Block flush thread and disable compaction thread
4531
  env_->SetBackgroundThreads(1, Env::LOW);
Y
Yi Wu 已提交
4532 4533
  env_->SetBackgroundThreads(1, Env::HIGH);
  DestroyAndReopen(options);
4534

Y
Yi Wu 已提交
4535 4536 4537 4538 4539 4540 4541
  auto gen_l0_kb = [this](int start, int size, int stride) {
    Random rnd(301);
    for (int i = 0; i < size; i++) {
      ASSERT_OK(Put(Key(start + stride * i), RandomString(&rnd, 1024)));
    }
    dbfull()->TEST_WaitForFlushMemTable();
  };
4542

Y
Yi Wu 已提交
4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557
  // Write 3 files that have the same key range.
  // Since level0_file_num_compaction_trigger is 3, compaction should be
  // triggered. The compaction should result in one L1 file
  gen_l0_kb(0, 64, 1);
  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  gen_l0_kb(0, 64, 1);
  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
  gen_l0_kb(0, 64, 1);
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ("0,1", FilesPerLevel());
  std::vector<LiveFileMetaData> metadata;
  db_->GetLiveFilesMetaData(&metadata);
  ASSERT_EQ(1U, metadata.size());
  ASSERT_LE(metadata[0].size, k64KB + k4KB);
  ASSERT_GE(metadata[0].size, k64KB - k4KB);
4558

Y
Yi Wu 已提交
4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590
  // Test compaction trigger and target_file_size_base
  // Reduce compaction trigger to 2, and reduce L1 file size to 32KB.
  // Writing to 64KB L0 files should trigger a compaction. Since these
  // 2 L0 files have the same key range, compaction merge them and should
  // result in 2 32KB L1 files.
  ASSERT_OK(dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
                                  {"target_file_size_base", ToString(k32KB)}}));

  gen_l0_kb(0, 64, 1);
  ASSERT_EQ("1,1", FilesPerLevel());
  gen_l0_kb(0, 64, 1);
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ("0,2", FilesPerLevel());
  metadata.clear();
  db_->GetLiveFilesMetaData(&metadata);
  ASSERT_EQ(2U, metadata.size());
  ASSERT_LE(metadata[0].size, k32KB + k4KB);
  ASSERT_GE(metadata[0].size, k32KB - k4KB);
  ASSERT_LE(metadata[1].size, k32KB + k4KB);
  ASSERT_GE(metadata[1].size, k32KB - k4KB);

  // Test max_bytes_for_level_base
  // Increase level base size to 256KB and write enough data that will
  // fill L1 and L2. L1 size should be around 256KB while L2 size should be
  // around 256KB x 4.
  ASSERT_OK(
      dbfull()->SetOptions({{"max_bytes_for_level_base", ToString(k1MB)}}));

  // writing 96 x 64KB => 6 * 1024KB
  // (L1 + L2) = (1 + 4) * 1024KB
  for (int i = 0; i < 96; ++i) {
    gen_l0_kb(i, 64, 96);
4591
  }
Y
Yi Wu 已提交
4592 4593 4594
  dbfull()->TEST_WaitForCompact();
  ASSERT_GT(SizeAtLevel(1), k1MB / 2);
  ASSERT_LT(SizeAtLevel(1), k1MB + k1MB / 2);
4595

Y
Yi Wu 已提交
4596 4597 4598
  // Within (0.5, 1.5) of 4MB.
  ASSERT_GT(SizeAtLevel(2), 2 * k1MB);
  ASSERT_LT(SizeAtLevel(2), 6 * k1MB);
4599

Y
Yi Wu 已提交
4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620
  // Test max_bytes_for_level_multiplier and
  // max_bytes_for_level_base. Now, reduce both mulitplier and level base,
  // After filling enough data that can fit in L1 - L3, we should see L1 size
  // reduces to 128KB from 256KB which was asserted previously. Same for L2.
  ASSERT_OK(
      dbfull()->SetOptions({{"max_bytes_for_level_multiplier", "2"},
                            {"max_bytes_for_level_base", ToString(k128KB)}}));

  // writing 20 x 64KB = 10 x 128KB
  // (L1 + L2 + L3) = (1 + 2 + 4) * 128KB
  for (int i = 0; i < 20; ++i) {
    gen_l0_kb(i, 64, 32);
  }
  dbfull()->TEST_WaitForCompact();
  uint64_t total_size = SizeAtLevel(1) + SizeAtLevel(2) + SizeAtLevel(3);
  ASSERT_TRUE(total_size < k128KB * 7 * 1.5);

  // Test level0_stop_writes_trigger.
  // Clean up memtable and L0. Block compaction threads. If continue to write
  // and flush memtables. We should see put stop after 8 memtable flushes
  // since level0_stop_writes_trigger = 8
4621
  dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  // Block compaction
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  int count = 0;
  Random rnd(301);
  WriteOptions wo;
  while (count < 64) {
    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
4634
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
4635 4636 4637 4638 4639
    count++;
    if (dbfull()->TEST_write_controler().IsStopped()) {
      sleeping_task_low.WakeUp();
      break;
    }
4640
  }
Y
Yi Wu 已提交
4641 4642 4643
  // Stop trigger = 8
  ASSERT_EQ(count, 8);
  // Unblock
4644
  sleeping_task_low.WaitUntilDone();
S
sdong 已提交
4645

Y
Yi Wu 已提交
4646 4647 4648 4649 4650 4651 4652
  // Now reduce level0_stop_writes_trigger to 6. Clear up memtables and L0.
  // Block compaction thread again. Perform the put and memtable flushes
  // until we see the stop after 6 memtable flushes.
  ASSERT_OK(dbfull()->SetOptions({{"level0_stop_writes_trigger", "6"}}));
  dbfull()->TEST_FlushMemTable(true);
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
S
sdong 已提交
4653

Y
Yi Wu 已提交
4654 4655
  // Block compaction again
  sleeping_task_low.Reset();
4656 4657 4658
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
Y
Yi Wu 已提交
4659 4660 4661
  count = 0;
  while (count < 64) {
    ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo));
4662
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
4663 4664 4665 4666 4667
    count++;
    if (dbfull()->TEST_write_controler().IsStopped()) {
      sleeping_task_low.WakeUp();
      break;
    }
S
sdong 已提交
4668
  }
Y
Yi Wu 已提交
4669 4670
  ASSERT_EQ(count, 6);
  // Unblock
4671 4672
  sleeping_task_low.WaitUntilDone();

Y
Yi Wu 已提交
4673 4674 4675 4676 4677 4678 4679 4680
  // Test disable_auto_compactions
  // Compaction thread is unblocked but auto compaction is disabled. Write
  // 4 L0 files and compaction should be triggered. If auto compaction is
  // disabled, then TEST_WaitForCompact will be waiting for nothing. Number of
  // L0 files do not change after the call.
  ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "true"}}));
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
S
sdong 已提交
4681

Y
Yi Wu 已提交
4682 4683 4684 4685 4686 4687 4688
  for (int i = 0; i < 4; ++i) {
    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
    // Wait for compaction so that put won't stop
    dbfull()->TEST_FlushMemTable(true);
  }
  dbfull()->TEST_WaitForCompact();
  ASSERT_EQ(NumTableFilesAtLevel(0), 4);
4689

Y
Yi Wu 已提交
4690 4691 4692 4693 4694
  // Enable auto compaction and perform the same test, # of L0 files should be
  // reduced after compaction.
  ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "false"}}));
  dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
4695

Y
Yi Wu 已提交
4696 4697 4698 4699
  for (int i = 0; i < 4; ++i) {
    ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
    // Wait for compaction so that put won't stop
    dbfull()->TEST_FlushMemTable(true);
S
sdong 已提交
4700
  }
Y
Yi Wu 已提交
4701 4702 4703
  dbfull()->TEST_WaitForCompact();
  ASSERT_LT(NumTableFilesAtLevel(0), 4);
}
4704

4705
// Test dynamic FIFO compaction options.
4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718
// This test covers just option parsing and makes sure that the options are
// correctly assigned. Also look at DBOptionsTest.SetFIFOCompactionOptions
// test which makes sure that the FIFO compaction funcionality is working
// as expected on dynamically changing the options.
// Even more FIFOCompactionTests are at DBTest.FIFOCompaction* .
TEST_F(DBTest, DynamicFIFOCompactionOptions) {
  Options options;
  options.create_if_missing = true;
  DestroyAndReopen(options);

  // Initial defaults
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            1024 * 1024 * 1024);
4719
  ASSERT_EQ(dbfull()->GetOptions().ttl, 0);
4720 4721 4722 4723 4724 4725 4726
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_fifo", "{max_table_files_size=23;}"}}));
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
4727
  ASSERT_EQ(dbfull()->GetOptions().ttl, 0);
4728 4729 4730
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

4731
  ASSERT_OK(dbfull()->SetOptions({{"ttl", "97"}}));
4732 4733
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
4734
  ASSERT_EQ(dbfull()->GetOptions().ttl, 97);
4735 4736 4737
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

4738
  ASSERT_OK(dbfull()->SetOptions({{"ttl", "203"}}));
4739 4740
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
4741
  ASSERT_EQ(dbfull()->GetOptions().ttl, 203);
4742 4743 4744 4745 4746 4747 4748
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_fifo", "{allow_compaction=true;}"}}));
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            23);
4749
  ASSERT_EQ(dbfull()->GetOptions().ttl, 203);
4750 4751 4752 4753
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            true);

  ASSERT_OK(dbfull()->SetOptions(
4754
      {{"compaction_options_fifo", "{max_table_files_size=31;}"}}));
4755 4756
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            31);
4757
  ASSERT_EQ(dbfull()->GetOptions().ttl, 203);
4758 4759 4760 4761 4762
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            true);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_fifo",
4763 4764
        "{max_table_files_size=51;allow_compaction=true;}"}}));
  ASSERT_OK(dbfull()->SetOptions({{"ttl", "49"}}));
4765 4766
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size,
            51);
4767
  ASSERT_EQ(dbfull()->GetOptions().ttl, 49);
4768 4769 4770 4771
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction,
            true);
}

4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838
TEST_F(DBTest, DynamicUniversalCompactionOptions) {
  Options options;
  options.create_if_missing = true;
  DestroyAndReopen(options);

  // Initial defaults
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 1);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
            2);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
            UINT_MAX);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.max_size_amplification_percent,
            200);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.compression_size_percent,
            -1);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.stop_style,
            kCompactionStopStyleTotalSize);
  ASSERT_EQ(
      dbfull()->GetOptions().compaction_options_universal.allow_trivial_move,
      false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_universal", "{size_ratio=7;}"}}));
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
            2);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
            UINT_MAX);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.max_size_amplification_percent,
            200);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.compression_size_percent,
            -1);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.stop_style,
            kCompactionStopStyleTotalSize);
  ASSERT_EQ(
      dbfull()->GetOptions().compaction_options_universal.allow_trivial_move,
      false);

  ASSERT_OK(dbfull()->SetOptions(
      {{"compaction_options_universal", "{min_merge_width=11;}"}}));
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
            11);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
            UINT_MAX);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.max_size_amplification_percent,
            200);
  ASSERT_EQ(dbfull()
                ->GetOptions()
                .compaction_options_universal.compression_size_percent,
            -1);
  ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.stop_style,
            kCompactionStopStyleTotalSize);
  ASSERT_EQ(
      dbfull()->GetOptions().compaction_options_universal.allow_trivial_move,
      false);
}
4839
#endif  // ROCKSDB_LITE
4840

Y
Yi Wu 已提交
4841 4842 4843 4844 4845 4846 4847 4848
TEST_F(DBTest, FileCreationRandomFailure) {
  Options options;
  options.env = env_;
  options.create_if_missing = true;
  options.write_buffer_size = 100000;  // Small write buffer
  options.target_file_size_base = 200000;
  options.max_bytes_for_level_base = 1000000;
  options.max_bytes_for_level_multiplier = 2;
4849

Y
Yi Wu 已提交
4850 4851
  DestroyAndReopen(options);
  Random rnd(301);
S
sdong 已提交
4852

Y
Yi Wu 已提交
4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879
  const int kCDTKeysPerBuffer = 4;
  const int kTestSize = kCDTKeysPerBuffer * 4096;
  const int kTotalIteration = 100;
  // the second half of the test involves in random failure
  // of file creation.
  const int kRandomFailureTest = kTotalIteration / 2;
  std::vector<std::string> values;
  for (int i = 0; i < kTestSize; ++i) {
    values.push_back("NOT_FOUND");
  }
  for (int j = 0; j < kTotalIteration; ++j) {
    if (j == kRandomFailureTest) {
      env_->non_writeable_rate_.store(90);
    }
    for (int k = 0; k < kTestSize; ++k) {
      // here we expect some of the Put fails.
      std::string value = RandomString(&rnd, 100);
      Status s = Put(Key(k), Slice(value));
      if (s.ok()) {
        // update the latest successful put
        values[k] = value;
      }
      // But everything before we simulate the failure-test should succeed.
      if (j < kRandomFailureTest) {
        ASSERT_OK(s);
      }
    }
S
sdong 已提交
4880 4881
  }

Y
Yi Wu 已提交
4882 4883 4884
  // If rocksdb does not do the correct job, internal assert will fail here.
  dbfull()->TEST_WaitForFlushMemTable();
  dbfull()->TEST_WaitForCompact();
S
sdong 已提交
4885

Y
Yi Wu 已提交
4886 4887 4888 4889 4890
  // verify we have the latest successful update
  for (int k = 0; k < kTestSize; ++k) {
    auto v = Get(Key(k));
    ASSERT_EQ(v, values[k]);
  }
4891

Y
Yi Wu 已提交
4892 4893 4894 4895 4896 4897 4898 4899
  // reopen and reverify we have the latest successful update
  env_->non_writeable_rate_.store(0);
  Reopen(options);
  for (int k = 0; k < kTestSize; ++k) {
    auto v = Get(Key(k));
    ASSERT_EQ(v, values[k]);
  }
}
S
sdong 已提交
4900

Y
Yi Wu 已提交
4901
#ifndef ROCKSDB_LITE
4902

Y
Yi Wu 已提交
4903 4904 4905 4906 4907 4908 4909 4910 4911
TEST_F(DBTest, DynamicMiscOptions) {
  // Test max_sequential_skip_in_iterations
  Options options;
  options.env = env_;
  options.create_if_missing = true;
  options.max_sequential_skip_in_iterations = 16;
  options.compression = kNoCompression;
  options.statistics = rocksdb::CreateDBStatistics();
  DestroyAndReopen(options);
S
sdong 已提交
4912

Y
Yi Wu 已提交
4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934
  auto assert_reseek_count = [this, &options](int key_start, int num_reseek) {
    int key0 = key_start;
    int key1 = key_start + 1;
    int key2 = key_start + 2;
    Random rnd(301);
    ASSERT_OK(Put(Key(key0), RandomString(&rnd, 8)));
    for (int i = 0; i < 10; ++i) {
      ASSERT_OK(Put(Key(key1), RandomString(&rnd, 8)));
    }
    ASSERT_OK(Put(Key(key2), RandomString(&rnd, 8)));
    std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
    iter->Seek(Key(key1));
    ASSERT_TRUE(iter->Valid());
    ASSERT_EQ(iter->key().compare(Key(key1)), 0);
    iter->Next();
    ASSERT_TRUE(iter->Valid());
    ASSERT_EQ(iter->key().compare(Key(key2)), 0);
    ASSERT_EQ(num_reseek,
              TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION));
  };
  // No reseek
  assert_reseek_count(100, 0);
S
sdong 已提交
4935

Y
Yi Wu 已提交
4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947
  ASSERT_OK(dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "4"}}));
  // Clear memtable and make new option effective
  dbfull()->TEST_FlushMemTable(true);
  // Trigger reseek
  assert_reseek_count(200, 1);

  ASSERT_OK(
      dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "16"}}));
  // Clear memtable and make new option effective
  dbfull()->TEST_FlushMemTable(true);
  // No reseek
  assert_reseek_count(300, 1);
A
Aaron Gao 已提交
4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965

  MutableCFOptions mutable_cf_options;
  CreateAndReopenWithCF({"pikachu"}, options);
  // Test soft_pending_compaction_bytes_limit,
  // hard_pending_compaction_bytes_limit
  ASSERT_OK(dbfull()->SetOptions(
      handles_[1], {{"soft_pending_compaction_bytes_limit", "200"},
                    {"hard_pending_compaction_bytes_limit", "300"}}));
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
                                                     &mutable_cf_options));
  ASSERT_EQ(200, mutable_cf_options.soft_pending_compaction_bytes_limit);
  ASSERT_EQ(300, mutable_cf_options.hard_pending_compaction_bytes_limit);
  // Test report_bg_io_stats
  ASSERT_OK(
      dbfull()->SetOptions(handles_[1], {{"report_bg_io_stats", "true"}}));
  // sanity check
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
                                                     &mutable_cf_options));
S
Siying Dong 已提交
4966
  ASSERT_TRUE(mutable_cf_options.report_bg_io_stats);
A
Aaron Gao 已提交
4967 4968 4969 4970 4971 4972
  // Test compression
  // sanity check
  ASSERT_OK(dbfull()->SetOptions({{"compression", "kNoCompression"}}));
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0],
                                                     &mutable_cf_options));
  ASSERT_EQ(CompressionType::kNoCompression, mutable_cf_options.compression);
4973 4974 4975 4976 4977 4978 4979 4980 4981

  if (Snappy_Supported()) {
    ASSERT_OK(dbfull()->SetOptions({{"compression", "kSnappyCompression"}}));
    ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0],
                                                       &mutable_cf_options));
    ASSERT_EQ(CompressionType::kSnappyCompression,
              mutable_cf_options.compression);
  }

A
Aaron Gao 已提交
4982 4983 4984 4985 4986
  // Test paranoid_file_checks already done in db_block_cache_test
  ASSERT_OK(
      dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "true"}}));
  ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
                                                     &mutable_cf_options));
S
Siying Dong 已提交
4987
  ASSERT_TRUE(mutable_cf_options.report_bg_io_stats);
S
sdong 已提交
4988
}
Y
Yi Wu 已提交
4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003
#endif  // ROCKSDB_LITE

TEST_F(DBTest, L0L1L2AndUpHitCounter) {
  Options options = CurrentOptions();
  options.write_buffer_size = 32 * 1024;
  options.target_file_size_base = 32 * 1024;
  options.level0_file_num_compaction_trigger = 2;
  options.level0_slowdown_writes_trigger = 2;
  options.level0_stop_writes_trigger = 4;
  options.max_bytes_for_level_base = 64 * 1024;
  options.max_write_buffer_number = 2;
  options.max_background_compactions = 8;
  options.max_background_flushes = 8;
  options.statistics = rocksdb::CreateDBStatistics();
  CreateAndReopenWithCF({"mypikachu"}, options);
5004

Y
Yi Wu 已提交
5005 5006 5007 5008 5009 5010 5011
  int numkeys = 20000;
  for (int i = 0; i < numkeys; i++) {
    ASSERT_OK(Put(1, Key(i), "val"));
  }
  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0));
  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L1));
  ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L2_AND_UP));
5012

Y
Yi Wu 已提交
5013 5014
  ASSERT_OK(Flush(1));
  dbfull()->TEST_WaitForCompact();
5015

Y
Yi Wu 已提交
5016 5017
  for (int i = 0; i < numkeys; i++) {
    ASSERT_EQ(Get(1, Key(i)), "val");
5018 5019
  }

Y
Yi Wu 已提交
5020 5021 5022 5023 5024 5025 5026
  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L0), 100);
  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L1), 100);
  ASSERT_GT(TestGetTickerCount(options, GET_HIT_L2_AND_UP), 100);

  ASSERT_EQ(numkeys, TestGetTickerCount(options, GET_HIT_L0) +
                         TestGetTickerCount(options, GET_HIT_L1) +
                         TestGetTickerCount(options, GET_HIT_L2_AND_UP));
5027
}
S
sdong 已提交
5028

Y
Yi Wu 已提交
5029 5030 5031 5032 5033
TEST_F(DBTest, EncodeDecompressedBlockSizeTest) {
  // iter 0 -- zlib
  // iter 1 -- bzip2
  // iter 2 -- lz4
  // iter 3 -- lz4HC
5034
  // iter 4 -- xpress
5035
  CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
A
Aaron Gao 已提交
5036
                                    kLZ4Compression, kLZ4HCCompression,
5037 5038 5039
                                    kXpressCompression};
  for (auto comp : compressions) {
    if (!CompressionTypeSupported(comp)) {
Y
Yi Wu 已提交
5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050
      continue;
    }
    // first_table_version 1 -- generate with table_version == 1, read with
    // table_version == 2
    // first_table_version 2 -- generate with table_version == 2, read with
    // table_version == 1
    for (int first_table_version = 1; first_table_version <= 2;
         ++first_table_version) {
      BlockBasedTableOptions table_options;
      table_options.format_version = first_table_version;
      table_options.filter_policy.reset(NewBloomFilterPolicy(10));
5051
      Options options = CurrentOptions();
Y
Yi Wu 已提交
5052 5053
      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
      options.create_if_missing = true;
5054
      options.compression = comp;
Y
Yi Wu 已提交
5055 5056
      DestroyAndReopen(options);

5057
      int kNumKeysWritten = 1000;
Y
Yi Wu 已提交
5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071

      Random rnd(301);
      for (int i = 0; i < kNumKeysWritten; ++i) {
        // compressible string
        ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a')));
      }

      table_options.format_version = first_table_version == 1 ? 2 : 1;
      options.table_factory.reset(NewBlockBasedTableFactory(table_options));
      Reopen(options);
      for (int i = 0; i < kNumKeysWritten; ++i) {
        auto r = Get(Key(i));
        ASSERT_EQ(r.substr(128), std::string(128, 'a'));
      }
5072 5073 5074 5075
    }
  }
}

Y
Yi Wu 已提交
5076
TEST_F(DBTest, CloseSpeedup) {
5077
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5078 5079 5080 5081 5082 5083 5084
  options.compaction_style = kCompactionStyleLevel;
  options.write_buffer_size = 110 << 10;  // 110KB
  options.arena_block_size = 4 << 10;
  options.level0_file_num_compaction_trigger = 2;
  options.num_levels = 4;
  options.max_bytes_for_level_base = 400 * 1024;
  options.max_write_buffer_number = 16;
5085

Y
Yi Wu 已提交
5086 5087 5088 5089 5090 5091 5092 5093 5094
  // Block background threads
  env_->SetBackgroundThreads(1, Env::LOW);
  env_->SetBackgroundThreads(1, Env::HIGH);
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  test::SleepingBackgroundTask sleeping_task_high;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
                 &sleeping_task_high, Env::Priority::HIGH);
5095

Y
Yi Wu 已提交
5096 5097 5098 5099 5100 5101 5102
  std::vector<std::string> filenames;
  env_->GetChildren(dbname_, &filenames);
  // Delete archival files.
  for (size_t i = 0; i < filenames.size(); ++i) {
    env_->DeleteFile(dbname_ + "/" + filenames[i]);
  }
  env_->DeleteDir(dbname_);
5103 5104
  DestroyAndReopen(options);

Y
Yi Wu 已提交
5105 5106 5107
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
  env_->SetBackgroundThreads(1, Env::LOW);
  env_->SetBackgroundThreads(1, Env::HIGH);
5108
  Random rnd(301);
Y
Yi Wu 已提交
5109
  int key_idx = 0;
5110

Y
Yi Wu 已提交
5111 5112 5113 5114 5115
  // First three 110KB files are not going to level 2
  // After that, (100K, 200K)
  for (int num = 0; num < 5; num++) {
    GenerateNewFile(&rnd, &key_idx, true);
  }
5116

Y
Yi Wu 已提交
5117
  ASSERT_EQ(0, GetSstFileCount(dbname_));
5118 5119

  Close();
Y
Yi Wu 已提交
5120
  ASSERT_EQ(0, GetSstFileCount(dbname_));
5121

Y
Yi Wu 已提交
5122 5123 5124 5125 5126
  // Unblock background threads
  sleeping_task_high.WakeUp();
  sleeping_task_high.WaitUntilDone();
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
5127

Y
Yi Wu 已提交
5128
  Destroy(options);
5129 5130
}

Y
Yi Wu 已提交
5131 5132 5133
class DelayedMergeOperator : public MergeOperator {
 private:
  DBTest* db_test_;
I
Islam AbdelRahman 已提交
5134

Y
Yi Wu 已提交
5135 5136
 public:
  explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {}
5137

5138 5139
  bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
                   MergeOperationOutput* merge_out) const override {
Y
Yi Wu 已提交
5140
    db_test_->env_->addon_time_.fetch_add(1000);
5141
    merge_out->new_value = "";
Y
Yi Wu 已提交
5142
    return true;
5143 5144
  }

5145
  const char* Name() const override { return "DelayedMergeOperator"; }
Y
Yi Wu 已提交
5146
};
I
Islam AbdelRahman 已提交
5147

Y
Yi Wu 已提交
5148 5149 5150 5151 5152
TEST_F(DBTest, MergeTestTime) {
  std::string one, two, three;
  PutFixed64(&one, 1);
  PutFixed64(&two, 2);
  PutFixed64(&three, 3);
I
Islam AbdelRahman 已提交
5153

Y
Yi Wu 已提交
5154 5155 5156 5157
  // Enable time profiling
  SetPerfLevel(kEnableTime);
  this->env_->addon_time_.store(0);
  this->env_->time_elapse_only_sleep_ = true;
M
Maysam Yabandeh 已提交
5158
  this->env_->no_slowdown_ = true;
I
Islam AbdelRahman 已提交
5159
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5160 5161
  options.statistics = rocksdb::CreateDBStatistics();
  options.merge_operator.reset(new DelayedMergeOperator(this));
I
Islam AbdelRahman 已提交
5162 5163
  DestroyAndReopen(options);

Y
Yi Wu 已提交
5164 5165 5166 5167 5168 5169 5170
  ASSERT_EQ(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0);
  db_->Put(WriteOptions(), "foo", one);
  ASSERT_OK(Flush());
  ASSERT_OK(db_->Merge(WriteOptions(), "foo", two));
  ASSERT_OK(Flush());
  ASSERT_OK(db_->Merge(WriteOptions(), "foo", three));
  ASSERT_OK(Flush());
I
Islam AbdelRahman 已提交
5171

Y
Yi Wu 已提交
5172 5173 5174 5175 5176
  ReadOptions opt;
  opt.verify_checksums = true;
  opt.snapshot = nullptr;
  std::string result;
  db_->Get(opt, "foo", &result);
I
Islam AbdelRahman 已提交
5177

Y
Yi Wu 已提交
5178
  ASSERT_EQ(1000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
I
Islam AbdelRahman 已提交
5179

Y
Yi Wu 已提交
5180 5181 5182 5183 5184 5185 5186
  ReadOptions read_options;
  std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
  int count = 0;
  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
    ASSERT_OK(iter->status());
    ++count;
  }
I
Islam AbdelRahman 已提交
5187

Y
Yi Wu 已提交
5188 5189
  ASSERT_EQ(1, count);
  ASSERT_EQ(2000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME));
D
Daniel Black 已提交
5190
#ifdef ROCKSDB_USING_THREAD_STATUS
Y
Yi Wu 已提交
5191 5192 5193
  ASSERT_GT(TestGetTickerCount(options, FLUSH_WRITE_BYTES), 0);
#endif  // ROCKSDB_USING_THREAD_STATUS
  this->env_->time_elapse_only_sleep_ = false;
I
Islam AbdelRahman 已提交
5194 5195
}

Y
Yi Wu 已提交
5196 5197 5198
#ifndef ROCKSDB_LITE
TEST_P(DBTestWithParam, MergeCompactionTimeTest) {
  SetPerfLevel(kEnableTime);
5199
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5200 5201 5202 5203 5204
  options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
  options.statistics = rocksdb::CreateDBStatistics();
  options.merge_operator.reset(new DelayedMergeOperator(this));
  options.compaction_style = kCompactionStyleUniversal;
  options.max_subcompactions = max_subcompactions_;
5205 5206
  DestroyAndReopen(options);

Y
Yi Wu 已提交
5207 5208
  for (int i = 0; i < 1000; i++) {
    ASSERT_OK(db_->Merge(WriteOptions(), "foo", "TEST"));
5209 5210
    ASSERT_OK(Flush());
  }
Y
Yi Wu 已提交
5211 5212
  dbfull()->TEST_WaitForFlushMemTable();
  dbfull()->TEST_WaitForCompact();
5213

Y
Yi Wu 已提交
5214
  ASSERT_NE(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0);
5215
}
5216

Y
Yi Wu 已提交
5217
TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
5218
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5219 5220
  options.compaction_filter_factory =
      std::make_shared<DelayFilterFactory>(this);
5221
  options.disable_auto_compactions = true;
Y
Yi Wu 已提交
5222 5223
  options.create_if_missing = true;
  options.statistics = rocksdb::CreateDBStatistics();
5224
  options.statistics->set_stats_level(kExceptTimeForMutex);
Y
Yi Wu 已提交
5225
  options.max_subcompactions = max_subcompactions_;
5226 5227
  DestroyAndReopen(options);

Y
Yi Wu 已提交
5228 5229 5230 5231 5232 5233
  // put some data
  for (int table = 0; table < 4; ++table) {
    for (int i = 0; i < 10 + table; ++i) {
      Put(ToString(table * 100 + i), "val");
    }
    Flush();
5234 5235
  }

Y
Yi Wu 已提交
5236 5237 5238 5239
  CompactRangeOptions cro;
  cro.exclusive_manual_compaction = exclusive_manual_compaction_;
  ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
  ASSERT_EQ(0U, CountLiveFiles());
5240

Y
Yi Wu 已提交
5241
  Reopen(options);
5242

Y
Yi Wu 已提交
5243 5244 5245 5246
  Iterator* itr = db_->NewIterator(ReadOptions());
  itr->SeekToFirst();
  ASSERT_NE(TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME), 0);
  delete itr;
5247
}
Y
Yi Wu 已提交
5248
#endif  // ROCKSDB_LITE
5249

Y
Yi Wu 已提交
5250 5251 5252 5253 5254 5255
TEST_F(DBTest, TestLogCleanup) {
  Options options = CurrentOptions();
  options.write_buffer_size = 64 * 1024;  // very small
  // only two memtables allowed ==> only two log files
  options.max_write_buffer_number = 2;
  Reopen(options);
5256

Y
Yi Wu 已提交
5257 5258 5259 5260 5261
  for (int i = 0; i < 100000; ++i) {
    Put(Key(i), "val");
    // only 2 memtables will be alive, so logs_to_free needs to always be below
    // 2
    ASSERT_LT(dbfull()->TEST_LogsToFreeSize(), static_cast<size_t>(3));
5262 5263 5264
  }
}

Y
Yi Wu 已提交
5265 5266 5267 5268 5269 5270 5271
#ifndef ROCKSDB_LITE
TEST_F(DBTest, EmptyCompactedDB) {
  Options options = CurrentOptions();
  options.max_open_files = -1;
  Close();
  ASSERT_OK(ReadOnlyReopen(options));
  Status s = Put("new", "value");
5272
  ASSERT_TRUE(s.IsNotSupported());
Y
Yi Wu 已提交
5273
  Close();
5274
}
Y
Yi Wu 已提交
5275
#endif  // ROCKSDB_LITE
5276

I
Islam AbdelRahman 已提交
5277
#ifndef ROCKSDB_LITE
Y
Yi Wu 已提交
5278 5279 5280
TEST_F(DBTest, SuggestCompactRangeTest) {
  class CompactionFilterFactoryGetContext : public CompactionFilterFactory {
   public:
5281
    std::unique_ptr<CompactionFilter> CreateCompactionFilter(
Y
Yi Wu 已提交
5282 5283 5284 5285
        const CompactionFilter::Context& context) override {
      saved_context = context;
      std::unique_ptr<CompactionFilter> empty_filter;
      return empty_filter;
5286
    }
Y
Yi Wu 已提交
5287 5288
    const char* Name() const override {
      return "CompactionFilterFactoryGetContext";
5289
    }
Y
Yi Wu 已提交
5290 5291 5292 5293
    static bool IsManual(CompactionFilterFactory* compaction_filter_factory) {
      return reinterpret_cast<CompactionFilterFactoryGetContext*>(
                 compaction_filter_factory)
          ->saved_context.is_manual_compaction;
5294
    }
Y
Yi Wu 已提交
5295 5296
    CompactionFilter::Context saved_context;
  };
5297

5298
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5299 5300 5301 5302 5303 5304 5305 5306 5307
  options.memtable_factory.reset(
      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
  options.compaction_style = kCompactionStyleLevel;
  options.compaction_filter_factory.reset(
      new CompactionFilterFactoryGetContext());
  options.write_buffer_size = 200 << 10;
  options.arena_block_size = 4 << 10;
  options.level0_file_num_compaction_trigger = 4;
  options.num_levels = 4;
5308
  options.compression = kNoCompression;
Y
Yi Wu 已提交
5309 5310
  options.max_bytes_for_level_base = 450 << 10;
  options.target_file_size_base = 98 << 10;
5311
  options.max_compaction_bytes = static_cast<uint64_t>(1) << 60;  // inf
5312

Y
Yi Wu 已提交
5313
  Reopen(options);
5314

Y
Yi Wu 已提交
5315
  Random rnd(301);
5316

Y
Yi Wu 已提交
5317 5318
  for (int num = 0; num < 3; num++) {
    GenerateNewRandomFile(&rnd);
5319 5320
  }

Y
Yi Wu 已提交
5321 5322 5323 5324
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("0,4", FilesPerLevel(0));
  ASSERT_TRUE(!CompactionFilterFactoryGetContext::IsManual(
      options.compaction_filter_factory.get()));
5325

Y
Yi Wu 已提交
5326 5327
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("1,4", FilesPerLevel(0));
5328

Y
Yi Wu 已提交
5329 5330
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("2,4", FilesPerLevel(0));
5331

Y
Yi Wu 已提交
5332 5333 5334 5335 5336
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("3,4", FilesPerLevel(0));

  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("0,4,4", FilesPerLevel(0));
5337

Y
Yi Wu 已提交
5338 5339
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("1,4,4", FilesPerLevel(0));
5340

Y
Yi Wu 已提交
5341 5342
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("2,4,4", FilesPerLevel(0));
5343

Y
Yi Wu 已提交
5344 5345
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("3,4,4", FilesPerLevel(0));
5346

Y
Yi Wu 已提交
5347 5348
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("0,4,8", FilesPerLevel(0));
5349

Y
Yi Wu 已提交
5350 5351
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ("1,4,8", FilesPerLevel(0));
5352

Y
Yi Wu 已提交
5353 5354 5355 5356
  // compact it three times
  for (int i = 0; i < 3; ++i) {
    ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr));
    dbfull()->TEST_WaitForCompact();
5357 5358
  }

Y
Yi Wu 已提交
5359 5360 5361
  // All files are compacted
  ASSERT_EQ(0, NumTableFilesAtLevel(0));
  ASSERT_EQ(0, NumTableFilesAtLevel(1));
5362

Y
Yi Wu 已提交
5363 5364
  GenerateNewRandomFile(&rnd);
  ASSERT_EQ(1, NumTableFilesAtLevel(0));
5365

Y
Yi Wu 已提交
5366 5367 5368 5369
  // nonoverlapping with the file on level 0
  Slice start("a"), end("b");
  ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end));
  dbfull()->TEST_WaitForCompact();
5370

Y
Yi Wu 已提交
5371 5372
  // should not compact the level 0 file
  ASSERT_EQ(1, NumTableFilesAtLevel(0));
5373

Y
Yi Wu 已提交
5374 5375 5376 5377 5378 5379
  start = Slice("j");
  end = Slice("m");
  ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end));
  dbfull()->TEST_WaitForCompact();
  ASSERT_TRUE(CompactionFilterFactoryGetContext::IsManual(
      options.compaction_filter_factory.get()));
5380

Y
Yi Wu 已提交
5381 5382 5383
  // now it should compact the level 0 file
  ASSERT_EQ(0, NumTableFilesAtLevel(0));
  ASSERT_EQ(1, NumTableFilesAtLevel(1));
5384 5385
}

Y
Yi Wu 已提交
5386 5387 5388 5389 5390
TEST_F(DBTest, PromoteL0) {
  Options options = CurrentOptions();
  options.disable_auto_compactions = true;
  options.write_buffer_size = 10 * 1024 * 1024;
  DestroyAndReopen(options);
5391

Y
Yi Wu 已提交
5392 5393 5394
  // non overlapping ranges
  std::vector<std::pair<int32_t, int32_t>> ranges = {
      {81, 160}, {0, 80}, {161, 240}, {241, 320}};
5395

Y
Yi Wu 已提交
5396
  int32_t value_size = 10 * 1024;  // 10 KB
5397

Y
Yi Wu 已提交
5398 5399 5400 5401 5402 5403
  Random rnd(301);
  std::map<int32_t, std::string> values;
  for (const auto& range : ranges) {
    for (int32_t j = range.first; j < range.second; j++) {
      values[j] = RandomString(&rnd, value_size);
      ASSERT_OK(Put(Key(j), values[j]));
5404
    }
Y
Yi Wu 已提交
5405 5406
    ASSERT_OK(Flush());
  }
5407

Y
Yi Wu 已提交
5408 5409 5410
  int32_t level0_files = NumTableFilesAtLevel(0, 0);
  ASSERT_EQ(level0_files, ranges.size());
  ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0);  // No files in L1
5411

Y
Yi Wu 已提交
5412 5413 5414 5415 5416
  // Promote L0 level to L2.
  ASSERT_OK(experimental::PromoteL0(db_, db_->DefaultColumnFamily(), 2));
  // We expect that all the files were trivially moved from L0 to L2
  ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
  ASSERT_EQ(NumTableFilesAtLevel(2, 0), level0_files);
5417

Y
Yi Wu 已提交
5418 5419 5420 5421
  for (const auto& kv : values) {
    ASSERT_EQ(Get(Key(kv.first)), kv.second);
  }
}
5422

Y
Yi Wu 已提交
5423 5424 5425 5426 5427
TEST_F(DBTest, PromoteL0Failure) {
  Options options = CurrentOptions();
  options.disable_auto_compactions = true;
  options.write_buffer_size = 10 * 1024 * 1024;
  DestroyAndReopen(options);
5428

Y
Yi Wu 已提交
5429 5430 5431 5432 5433 5434
  // Produce two L0 files with overlapping ranges.
  ASSERT_OK(Put(Key(0), ""));
  ASSERT_OK(Put(Key(3), ""));
  ASSERT_OK(Flush());
  ASSERT_OK(Put(Key(1), ""));
  ASSERT_OK(Flush());
5435

Y
Yi Wu 已提交
5436 5437 5438 5439
  Status status;
  // Fails because L0 has overlapping files.
  status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
  ASSERT_TRUE(status.IsInvalidArgument());
5440

Y
Yi Wu 已提交
5441 5442 5443
  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  // Now there is a file in L1.
  ASSERT_GE(NumTableFilesAtLevel(1, 0), 1);
5444

Y
Yi Wu 已提交
5445 5446 5447 5448 5449
  ASSERT_OK(Put(Key(5), ""));
  ASSERT_OK(Flush());
  // Fails because L1 is non-empty.
  status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
  ASSERT_TRUE(status.IsInvalidArgument());
5450 5451
}

Y
Yi Wu 已提交
5452
// Github issue #596
5453 5454 5455
TEST_F(DBTest, CompactRangeWithEmptyBottomLevel) {
  const int kNumLevels = 2;
  const int kNumL0Files = 2;
5456
  Options options = CurrentOptions();
5457 5458
  options.disable_auto_compactions = true;
  options.num_levels = kNumLevels;
Y
Yi Wu 已提交
5459
  DestroyAndReopen(options);
5460

Y
Yi Wu 已提交
5461
  Random rnd(301);
5462 5463 5464
  for (int i = 0; i < kNumL0Files; ++i) {
    ASSERT_OK(Put(Key(0), RandomString(&rnd, 1024)));
    Flush();
5465
  }
5466 5467
  ASSERT_EQ(NumTableFilesAtLevel(0), kNumL0Files);
  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
5468

Y
Yi Wu 已提交
5469
  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
5470 5471
  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  ASSERT_EQ(NumTableFilesAtLevel(1), kNumL0Files);
Y
Yi Wu 已提交
5472
}
Y
Yi Wu 已提交
5473
#endif  // ROCKSDB_LITE
5474

Y
Yi Wu 已提交
5475
TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
5476
  const int kNumL0Files = 50;
Y
Yi Wu 已提交
5477
  Options options = CurrentOptions();
5478 5479 5480 5481
  options.level0_file_num_compaction_trigger = 4;
  // never slowdown / stop
  options.level0_slowdown_writes_trigger = 999999;
  options.level0_stop_writes_trigger = 999999;
Y
Yi Wu 已提交
5482
  options.max_background_compactions = 10;
5483 5484
  DestroyAndReopen(options);

5485 5486 5487 5488 5489 5490 5491
  // schedule automatic compactions after the manual one starts, but before it
  // finishes to ensure conflict.
  rocksdb::SyncPoint::GetInstance()->LoadDependency(
      {{"DBImpl::BackgroundCompaction:Start",
        "DBTest::AutomaticConflictsWithManualCompaction:PrePuts"},
       {"DBTest::AutomaticConflictsWithManualCompaction:PostPuts",
        "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"}});
Y
Yi Wu 已提交
5492 5493
  std::atomic<int> callback_count(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
5494
      "DBImpl::MaybeScheduleFlushOrCompaction:Conflict",
5495
      [&](void* /*arg*/) { callback_count.fetch_add(1); });
Y
Yi Wu 已提交
5496
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
5497 5498 5499 5500 5501 5502 5503 5504 5505

  Random rnd(301);
  for (int i = 0; i < 2; ++i) {
    // put two keys to ensure no trivial move
    for (int j = 0; j < 2; ++j) {
      ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
    }
    ASSERT_OK(Flush());
  }
D
Dmitri Smirnov 已提交
5506
  port::Thread manual_compaction_thread([this]() {
5507 5508 5509 5510
    CompactRangeOptions croptions;
    croptions.exclusive_manual_compaction = true;
    ASSERT_OK(db_->CompactRange(croptions, nullptr, nullptr));
  });
5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521

  TEST_SYNC_POINT("DBTest::AutomaticConflictsWithManualCompaction:PrePuts");
  for (int i = 0; i < kNumL0Files; ++i) {
    // put two keys to ensure no trivial move
    for (int j = 0; j < 2; ++j) {
      ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
    }
    ASSERT_OK(Flush());
  }
  TEST_SYNC_POINT("DBTest::AutomaticConflictsWithManualCompaction:PostPuts");

Y
Yi Wu 已提交
5522
  ASSERT_GE(callback_count.load(), 1);
5523
  for (int i = 0; i < 2; ++i) {
Y
Yi Wu 已提交
5524 5525
    ASSERT_NE("NOT_FOUND", Get(Key(i)));
  }
5526 5527 5528
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  manual_compaction_thread.join();
  dbfull()->TEST_WaitForCompact();
5529 5530
}

5531
#ifndef ROCKSDB_LITE
5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591
TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) {
  Options options = CurrentOptions();
  options.max_background_compactions = 1;
  options.level0_file_num_compaction_trigger = 4;
  options.level0_slowdown_writes_trigger = 36;
  options.level0_stop_writes_trigger = 36;
  DestroyAndReopen(options);

  // generate files for manual compaction
  Random rnd(301);
  for (int i = 0; i < 2; ++i) {
    // put two keys to ensure no trivial move
    for (int j = 0; j < 2; ++j) {
      ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
    }
    ASSERT_OK(Flush());
  }

  rocksdb::ColumnFamilyMetaData cf_meta_data;
  db_->GetColumnFamilyMetaData(db_->DefaultColumnFamily(), &cf_meta_data);

  std::vector<std::string> input_files;
  input_files.push_back(cf_meta_data.levels[0].files[0].name);

  SyncPoint::GetInstance()->LoadDependency({
      {"CompactFilesImpl:0",
       "DBTest::CompactFilesShouldTriggerAutoCompaction:Begin"},
      {"DBTest::CompactFilesShouldTriggerAutoCompaction:End",
       "CompactFilesImpl:1"},
  });

  SyncPoint::GetInstance()->EnableProcessing();

  port::Thread manual_compaction_thread([&]() {
      auto s = db_->CompactFiles(CompactionOptions(),
          db_->DefaultColumnFamily(), input_files, 0);
  });

  TEST_SYNC_POINT(
          "DBTest::CompactFilesShouldTriggerAutoCompaction:Begin");
  // generate enough files to trigger compaction
  for (int i = 0; i < 20; ++i) {
    for (int j = 0; j < 2; ++j) {
      ASSERT_OK(Put(Key(j), RandomString(&rnd, 1024)));
    }
    ASSERT_OK(Flush());
  }
  db_->GetColumnFamilyMetaData(db_->DefaultColumnFamily(), &cf_meta_data);
  ASSERT_GT(cf_meta_data.levels[0].files.size(),
      options.level0_file_num_compaction_trigger);
  TEST_SYNC_POINT(
          "DBTest::CompactFilesShouldTriggerAutoCompaction:End");

  manual_compaction_thread.join();
  dbfull()->TEST_WaitForCompact();

  db_->GetColumnFamilyMetaData(db_->DefaultColumnFamily(), &cf_meta_data);
  ASSERT_LE(cf_meta_data.levels[0].files.size(),
      options.level0_file_num_compaction_trigger);
}
5592
#endif  // ROCKSDB_LITE
5593

Y
Yi Wu 已提交
5594 5595 5596
// Github issue #595
// Large write batch with column families
TEST_F(DBTest, LargeBatchWithColumnFamilies) {
5597 5598
  Options options = CurrentOptions();
  options.env = env_;
Y
Yi Wu 已提交
5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614
  options.write_buffer_size = 100000;  // Small write buffer
  CreateAndReopenWithCF({"pikachu"}, options);
  int64_t j = 0;
  for (int i = 0; i < 5; i++) {
    for (int pass = 1; pass <= 3; pass++) {
      WriteBatch batch;
      size_t write_size = 1024 * 1024 * (5 + i);
      fprintf(stderr, "prepare: %" ROCKSDB_PRIszt " MB, pass:%d\n",
              (write_size / 1024 / 1024), pass);
      for (;;) {
        std::string data(3000, j++ % 127 + 20);
        data += ToString(j);
        batch.Put(handles_[0], Slice(data), Slice(data));
        if (batch.GetDataSize() > write_size) {
          break;
        }
5615
      }
Y
Yi Wu 已提交
5616 5617 5618 5619
      fprintf(stderr, "write: %" ROCKSDB_PRIszt " MB\n",
              (batch.GetDataSize() / 1024 / 1024));
      ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
      fprintf(stderr, "done\n");
5620
    }
Y
Yi Wu 已提交
5621 5622 5623 5624
  }
  // make sure we can re-open it.
  ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
}
5625

Y
Yi Wu 已提交
5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636
// Make sure that Flushes can proceed in parallel with CompactRange()
TEST_F(DBTest, FlushesInParallelWithCompactRange) {
  // iter == 0 -- leveled
  // iter == 1 -- leveled, but throw in a flush between two levels compacting
  // iter == 2 -- universal
  for (int iter = 0; iter < 3; ++iter) {
    Options options = CurrentOptions();
    if (iter < 2) {
      options.compaction_style = kCompactionStyleLevel;
    } else {
      options.compaction_style = kCompactionStyleUniversal;
5637
    }
Y
Yi Wu 已提交
5638 5639 5640 5641 5642 5643 5644
    options.write_buffer_size = 110 << 10;
    options.level0_file_num_compaction_trigger = 4;
    options.num_levels = 4;
    options.compression = kNoCompression;
    options.max_bytes_for_level_base = 450 << 10;
    options.target_file_size_base = 98 << 10;
    options.max_write_buffer_number = 2;
5645 5646 5647

    DestroyAndReopen(options);

Y
Yi Wu 已提交
5648 5649 5650
    Random rnd(301);
    for (int num = 0; num < 14; num++) {
      GenerateNewRandomFile(&rnd);
5651 5652
    }

Y
Yi Wu 已提交
5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664
    if (iter == 1) {
      rocksdb::SyncPoint::GetInstance()->LoadDependency(
          {{"DBImpl::RunManualCompaction()::1",
            "DBTest::FlushesInParallelWithCompactRange:1"},
           {"DBTest::FlushesInParallelWithCompactRange:2",
            "DBImpl::RunManualCompaction()::2"}});
    } else {
      rocksdb::SyncPoint::GetInstance()->LoadDependency(
          {{"CompactionJob::Run():Start",
            "DBTest::FlushesInParallelWithCompactRange:1"},
           {"DBTest::FlushesInParallelWithCompactRange:2",
            "CompactionJob::Run():End"}});
5665
    }
Y
Yi Wu 已提交
5666
    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
5667

D
Dmitri Smirnov 已提交
5668
    std::vector<port::Thread> threads;
Y
Yi Wu 已提交
5669
    threads.emplace_back([&]() { Compact("a", "z"); });
5670

Y
Yi Wu 已提交
5671 5672 5673 5674 5675 5676 5677
    TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:1");

    // this has to start a flush. if flushes are blocked, this will try to
    // create
    // 3 memtables, and that will fail because max_write_buffer_number is 2
    for (int num = 0; num < 3; num++) {
      GenerateNewRandomFile(&rnd, /* nowait */ true);
5678 5679
    }

Y
Yi Wu 已提交
5680
    TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:2");
5681

Y
Yi Wu 已提交
5682 5683
    for (auto& t : threads) {
      t.join();
5684
    }
Y
Yi Wu 已提交
5685 5686
    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  }
5687 5688
}

Y
Yi Wu 已提交
5689 5690
TEST_F(DBTest, DelayedWriteRate) {
  const int kEntriesPerMemTable = 100;
S
Siying Dong 已提交
5691
  const int kTotalFlushes = 12;
5692

D
dyniusz 已提交
5693
  Options options = CurrentOptions();
Y
Yi Wu 已提交
5694 5695
  env_->SetBackgroundThreads(1, Env::LOW);
  options.env = env_;
M
Maysam Yabandeh 已提交
5696
  env_->no_slowdown_ = true;
Y
Yi Wu 已提交
5697 5698 5699 5700 5701 5702 5703 5704 5705
  options.write_buffer_size = 100000000;
  options.max_write_buffer_number = 256;
  options.max_background_compactions = 1;
  options.level0_file_num_compaction_trigger = 3;
  options.level0_slowdown_writes_trigger = 3;
  options.level0_stop_writes_trigger = 999999;
  options.delayed_write_rate = 20000000;  // Start with 200MB/s
  options.memtable_factory.reset(
      new SpecialSkipListFactory(kEntriesPerMemTable));
D
dyniusz 已提交
5706

Y
Yi Wu 已提交
5707 5708 5709 5710 5711 5712 5713 5714 5715 5716
  CreateAndReopenWithCF({"pikachu"}, options);

  // Block compactions
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);

  for (int i = 0; i < 3; i++) {
    Put(Key(i), std::string(10000, 'x'));
    Flush();
D
dyniusz 已提交
5717 5718
  }

Y
Yi Wu 已提交
5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736
  // These writes will be slowed down to 1KB/s
  uint64_t estimated_sleep_time = 0;
  Random rnd(301);
  Put("", "");
  uint64_t cur_rate = options.delayed_write_rate;
  for (int i = 0; i < kTotalFlushes; i++) {
    uint64_t size_memtable = 0;
    for (int j = 0; j < kEntriesPerMemTable; j++) {
      auto rand_num = rnd.Uniform(20);
      // Spread the size range to more.
      size_t entry_size = rand_num * rand_num * rand_num;
      WriteOptions wo;
      Put(Key(i), std::string(entry_size, 'x'), wo);
      size_memtable += entry_size + 18;
      // Occasionally sleep a while
      if (rnd.Uniform(20) == 6) {
        env_->SleepForMicroseconds(2666);
      }
D
dyniusz 已提交
5737
    }
Y
Yi Wu 已提交
5738 5739 5740
    dbfull()->TEST_WaitForFlushMemTable();
    estimated_sleep_time += size_memtable * 1000000u / cur_rate;
    // Slow down twice. One for memtable switch and one for flush finishes.
S
Siying Dong 已提交
5741 5742
    cur_rate = static_cast<uint64_t>(static_cast<double>(cur_rate) *
                                     kIncSlowdownRatio * kIncSlowdownRatio);
D
dyniusz 已提交
5743
  }
Y
Yi Wu 已提交
5744 5745 5746 5747 5748
  // Estimate the total sleep time fall into the rough range.
  ASSERT_GT(env_->addon_time_.load(),
            static_cast<int64_t>(estimated_sleep_time / 2));
  ASSERT_LT(env_->addon_time_.load(),
            static_cast<int64_t>(estimated_sleep_time * 2));
D
dyniusz 已提交
5749

M
Maysam Yabandeh 已提交
5750
  env_->no_slowdown_ = false;
Y
Yi Wu 已提交
5751 5752 5753
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
D
dyniusz 已提交
5754 5755
}

Y
Yi Wu 已提交
5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770
TEST_F(DBTest, HardLimit) {
  Options options = CurrentOptions();
  options.env = env_;
  env_->SetBackgroundThreads(1, Env::LOW);
  options.max_write_buffer_number = 256;
  options.write_buffer_size = 110 << 10;  // 110KB
  options.arena_block_size = 4 * 1024;
  options.level0_file_num_compaction_trigger = 4;
  options.level0_slowdown_writes_trigger = 999999;
  options.level0_stop_writes_trigger = 999999;
  options.hard_pending_compaction_bytes_limit = 800 << 10;
  options.max_bytes_for_level_base = 10000000000u;
  options.max_background_compactions = 1;
  options.memtable_factory.reset(
      new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
5771

Y
Yi Wu 已提交
5772 5773 5774 5775
  env_->SetBackgroundThreads(1, Env::LOW);
  test::SleepingBackgroundTask sleeping_task_low;
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
5776

Y
Yi Wu 已提交
5777
  CreateAndReopenWithCF({"pikachu"}, options);
5778

Y
Yi Wu 已提交
5779 5780
  std::atomic<int> callback_count(0);
  rocksdb::SyncPoint::GetInstance()->SetCallBack("DBImpl::DelayWrite:Wait",
5781
                                                 [&](void* /*arg*/) {
Y
Yi Wu 已提交
5782 5783 5784 5785
                                                   callback_count.fetch_add(1);
                                                   sleeping_task_low.WakeUp();
                                                 });
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
5786

Y
Yi Wu 已提交
5787 5788 5789 5790 5791 5792
  Random rnd(301);
  int key_idx = 0;
  for (int num = 0; num < 5; num++) {
    GenerateNewFile(&rnd, &key_idx, true);
    dbfull()->TEST_WaitForFlushMemTable();
  }
5793

Y
Yi Wu 已提交
5794
  ASSERT_EQ(0, callback_count.load());
5795

Y
Yi Wu 已提交
5796 5797 5798 5799 5800
  for (int num = 0; num < 5; num++) {
    GenerateNewFile(&rnd, &key_idx, true);
    dbfull()->TEST_WaitForFlushMemTable();
  }
  ASSERT_GE(callback_count.load(), 1);
5801

Y
Yi Wu 已提交
5802 5803 5804
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  sleeping_task_low.WaitUntilDone();
}
5805

5806
#if !defined(ROCKSDB_LITE) && !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
5807 5808
class WriteStallListener : public EventListener {
 public:
Y
Yi Wu 已提交
5809
  WriteStallListener() : condition_(WriteStallCondition::kNormal) {}
5810
  void OnStallConditionsChanged(const WriteStallInfo& info) override {
Y
Yi Wu 已提交
5811
    MutexLock l(&mutex_);
5812 5813 5814
    condition_ = info.condition.cur;
  }
  bool CheckCondition(WriteStallCondition expected) {
Y
Yi Wu 已提交
5815
    MutexLock l(&mutex_);
Y
Yi Wu 已提交
5816
    return expected == condition_;
5817 5818
  }
 private:
D
Dmitri Smirnov 已提交
5819
  port::Mutex   mutex_;
5820 5821 5822
  WriteStallCondition condition_;
};

Y
Yi Wu 已提交
5823 5824 5825 5826 5827 5828 5829 5830 5831
TEST_F(DBTest, SoftLimit) {
  Options options = CurrentOptions();
  options.env = env_;
  options.write_buffer_size = 100000;  // Small write buffer
  options.max_write_buffer_number = 256;
  options.level0_file_num_compaction_trigger = 1;
  options.level0_slowdown_writes_trigger = 3;
  options.level0_stop_writes_trigger = 999999;
  options.delayed_write_rate = 20000;  // About 200KB/s limited rate
5832
  options.soft_pending_compaction_bytes_limit = 160000;
Y
Yi Wu 已提交
5833 5834 5835 5836 5837
  options.target_file_size_base = 99999999;  // All into one file
  options.max_bytes_for_level_base = 50000;
  options.max_bytes_for_level_multiplier = 10;
  options.max_background_compactions = 1;
  options.compression = kNoCompression;
5838 5839
  WriteStallListener* listener = new WriteStallListener();
  options.listeners.emplace_back(listener);
5840

Y
Yi Wu 已提交
5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875
  // FlushMemtable with opt.wait=true does not wait for
  // `OnStallConditionsChanged` being called. The event listener is triggered
  // on `JobContext::Clean`, which happens after flush result is installed.
  // We use sync point to create a custom WaitForFlush that waits for
  // context cleanup.
  port::Mutex flush_mutex;
  port::CondVar flush_cv(&flush_mutex);
  bool flush_finished = false;
  auto InstallFlushCallback = [&]() {
    {
      MutexLock l(&flush_mutex);
      flush_finished = false;
    }
    SyncPoint::GetInstance()->SetCallBack(
        "DBImpl::BackgroundCallFlush:ContextCleanedUp", [&](void*) {
          {
            MutexLock l(&flush_mutex);
            flush_finished = true;
          }
          flush_cv.SignalAll();
        });
  };
  auto WaitForFlush = [&]() {
    {
      MutexLock l(&flush_mutex);
      while (!flush_finished) {
        flush_cv.Wait();
      }
    }
    SyncPoint::GetInstance()->ClearCallBack(
        "DBImpl::BackgroundCallFlush:ContextCleanedUp");
  };

  rocksdb::SyncPoint::GetInstance()->EnableProcessing();

Y
Yi Wu 已提交
5876
  Reopen(options);
5877 5878 5879 5880 5881

  // Generating 360KB in Level 3
  for (int i = 0; i < 72; i++) {
    Put(Key(i), std::string(5000, 'x'));
    if (i % 10 == 0) {
5882
      dbfull()->TEST_FlushMemTable(true, true);
5883 5884 5885 5886 5887 5888 5889 5890 5891
    }
  }
  dbfull()->TEST_WaitForCompact();
  MoveFilesToLevel(3);

  // Generating 360KB in Level 2
  for (int i = 0; i < 72; i++) {
    Put(Key(i), std::string(5000, 'x'));
    if (i % 10 == 0) {
5892
      dbfull()->TEST_FlushMemTable(true, true);
5893 5894 5895 5896 5897
    }
  }
  dbfull()->TEST_WaitForCompact();
  MoveFilesToLevel(2);

Y
Yi Wu 已提交
5898
  Put(Key(0), "");
5899

Y
Yi Wu 已提交
5900 5901 5902 5903 5904
  test::SleepingBackgroundTask sleeping_task_low;
  // Block compactions
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
5905

Y
Yi Wu 已提交
5906 5907 5908 5909 5910
  // Create 3 L0 files, making score of L0 to be 3.
  for (int i = 0; i < 3; i++) {
    Put(Key(i), std::string(5000, 'x'));
    Put(Key(100 - i), std::string(5000, 'x'));
    // Flush the file. File size is around 30KB.
Y
Yi Wu 已提交
5911
    InstallFlushCallback();
5912
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
5913
    WaitForFlush();
Y
Yi Wu 已提交
5914 5915
  }
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
5916
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kDelayed));
5917

Y
Yi Wu 已提交
5918 5919 5920 5921
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
  sleeping_task_low.Reset();
  dbfull()->TEST_WaitForCompact();
5922

Y
Yi Wu 已提交
5923 5924 5925 5926
  // Now there is one L1 file but doesn't trigger soft_rate_limit
  // The L1 file size is around 30KB.
  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
5927
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kNormal));
5928

Y
Yi Wu 已提交
5929 5930
  // Only allow one compactin going through.
  rocksdb::SyncPoint::GetInstance()->SetCallBack(
5931
      "BackgroundCallCompaction:0", [&](void* /*arg*/) {
Y
Yi Wu 已提交
5932 5933 5934 5935 5936
        // Schedule a sleeping task.
        sleeping_task_low.Reset();
        env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
                       &sleeping_task_low, Env::Priority::LOW);
      });
5937

Y
Yi Wu 已提交
5938 5939 5940 5941 5942 5943 5944 5945
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
                 Env::Priority::LOW);
  sleeping_task_low.WaitUntilSleeping();
  // Create 3 L0 files, making score of L0 to be 3
  for (int i = 0; i < 3; i++) {
    Put(Key(10 + i), std::string(5000, 'x'));
    Put(Key(90 - i), std::string(5000, 'x'));
    // Flush the file. File size is around 30KB.
Y
Yi Wu 已提交
5946
    InstallFlushCallback();
5947
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
5948
    WaitForFlush();
5949 5950
  }

Y
Yi Wu 已提交
5951 5952 5953 5954 5955
  // Wake up sleep task to enable compaction to run and waits
  // for it to go to sleep state again to make sure one compaction
  // goes through.
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilSleeping();
5956

Y
Yi Wu 已提交
5957 5958 5959 5960 5961
  // Now there is one L1 file (around 60KB) which exceeds 50KB base by 10KB
  // Given level multiplier 10, estimated pending compaction is around 100KB
  // doesn't trigger soft_pending_compaction_bytes_limit
  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
5962
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kNormal));
5963

Y
Yi Wu 已提交
5964 5965 5966 5967 5968
  // Create 3 L0 files, making score of L0 to be 3, higher than L0.
  for (int i = 0; i < 3; i++) {
    Put(Key(20 + i), std::string(5000, 'x'));
    Put(Key(80 - i), std::string(5000, 'x'));
    // Flush the file. File size is around 30KB.
Y
Yi Wu 已提交
5969
    InstallFlushCallback();
5970
    dbfull()->TEST_FlushMemTable(true, true);
Y
Yi Wu 已提交
5971
    WaitForFlush();
5972
  }
Y
Yi Wu 已提交
5973 5974 5975 5976 5977 5978 5979
  // Wake up sleep task to enable compaction to run and waits
  // for it to go to sleep state again to make sure one compaction
  // goes through.
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilSleeping();

  // Now there is one L1 file (around 90KB) which exceeds 50KB base by 40KB
5980 5981
  // L2 size is 360KB, so the estimated level fanout 4, estimated pending
  // compaction is around 200KB
Y
Yi Wu 已提交
5982 5983 5984
  // triggerring soft_pending_compaction_bytes_limit
  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
5985
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kDelayed));
5986

Y
Yi Wu 已提交
5987 5988
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilSleeping();
5989

Y
Yi Wu 已提交
5990
  ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
5991
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kNormal));
5992

Y
Yi Wu 已提交
5993 5994 5995 5996
  // shrink level base so L2 will hit soft limit easier.
  ASSERT_OK(dbfull()->SetOptions({
      {"max_bytes_for_level_base", "5000"},
  }));
5997

Y
Yi Wu 已提交
5998 5999 6000
  Put("", "");
  Flush();
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
6001
  ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kDelayed));
6002

Y
Yi Wu 已提交
6003 6004 6005 6006
  sleeping_task_low.WaitUntilSleeping();
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  sleeping_task_low.WakeUp();
  sleeping_task_low.WaitUntilDone();
6007 6008
}

Y
Yi Wu 已提交
6009
TEST_F(DBTest, LastWriteBufferDelay) {
6010
  Options options = CurrentOptions();
Y
Yi Wu 已提交
6011 6012 6013 6014 6015 6016 6017 6018 6019
  options.env = env_;
  options.write_buffer_size = 100000;
  options.max_write_buffer_number = 4;
  options.delayed_write_rate = 20000;
  options.compression = kNoCompression;
  options.disable_auto_compactions = true;
  int kNumKeysPerMemtable = 3;
  options.memtable_factory.reset(
      new SpecialSkipListFactory(kNumKeysPerMemtable));
6020

Y
Yi Wu 已提交
6021 6022 6023 6024 6025 6026
  Reopen(options);
  test::SleepingBackgroundTask sleeping_task;
  // Block flushes
  env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task,
                 Env::Priority::HIGH);
  sleeping_task.WaitUntilSleeping();
6027

Y
Yi Wu 已提交
6028 6029 6030 6031 6032 6033 6034
  // Create 3 L0 files, making score of L0 to be 3.
  for (int i = 0; i < 3; i++) {
    // Fill one mem table
    for (int j = 0; j < kNumKeysPerMemtable; j++) {
      Put(Key(j), "");
    }
    ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
6035
  }
Y
Yi Wu 已提交
6036 6037 6038
  // Inserting a new entry would create a new mem table, triggering slow down.
  Put(Key(0), "");
  ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
6039

Y
Yi Wu 已提交
6040 6041 6042
  sleeping_task.WakeUp();
  sleeping_task.WaitUntilDone();
}
6043
#endif  // !defined(ROCKSDB_LITE) && !defined(ROCKSDB_DISABLE_STALL_NOTIFICATION)
6044

Y
Yi Wu 已提交
6045 6046
TEST_F(DBTest, FailWhenCompressionNotSupportedTest) {
  CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
A
Aaron Gao 已提交
6047
                                    kLZ4Compression, kLZ4HCCompression,
6048 6049 6050
                                    kXpressCompression};
  for (auto comp : compressions) {
    if (!CompressionTypeSupported(comp)) {
Y
Yi Wu 已提交
6051 6052
      // not supported, we should fail the Open()
      Options options = CurrentOptions();
6053
      options.compression = comp;
Y
Yi Wu 已提交
6054 6055 6056 6057 6058
      ASSERT_TRUE(!TryReopen(options).ok());
      // Try if CreateColumnFamily also fails
      options.compression = kNoCompression;
      ASSERT_OK(TryReopen(options));
      ColumnFamilyOptions cf_options(options);
6059
      cf_options.compression = comp;
Y
Yi Wu 已提交
6060 6061
      ColumnFamilyHandle* handle;
      ASSERT_TRUE(!db_->CreateColumnFamily(cf_options, "name", &handle).ok());
6062 6063 6064 6065
    }
  }
}

6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078
TEST_F(DBTest, CreateColumnFamilyShouldFailOnIncompatibleOptions) {
  Options options = CurrentOptions();
  options.max_open_files = 100;
  Reopen(options);

  ColumnFamilyOptions cf_options(options);
  // ttl is only supported when max_open_files is -1.
  cf_options.ttl = 3600;
  ColumnFamilyHandle* handle;
  ASSERT_NOK(db_->CreateColumnFamily(cf_options, "pikachu", &handle));
  delete handle;
}

Y
Yi Wu 已提交
6079 6080
#ifndef ROCKSDB_LITE
TEST_F(DBTest, RowCache) {
6081
  Options options = CurrentOptions();
Y
Yi Wu 已提交
6082 6083
  options.statistics = rocksdb::CreateDBStatistics();
  options.row_cache = NewLRUCache(8192);
6084 6085
  DestroyAndReopen(options);

Y
Yi Wu 已提交
6086 6087
  ASSERT_OK(Put("foo", "bar"));
  ASSERT_OK(Flush());
6088

Y
Yi Wu 已提交
6089 6090 6091 6092 6093 6094 6095 6096 6097
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 0);
  ASSERT_EQ(Get("foo"), "bar");
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
  ASSERT_EQ(Get("foo"), "bar");
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 1);
  ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
}
6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127

TEST_F(DBTest, PinnableSliceAndRowCache) {
  Options options = CurrentOptions();
  options.statistics = rocksdb::CreateDBStatistics();
  options.row_cache = NewLRUCache(8192);
  DestroyAndReopen(options);

  ASSERT_OK(Put("foo", "bar"));
  ASSERT_OK(Flush());

  ASSERT_EQ(Get("foo"), "bar");
  ASSERT_EQ(
      reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
      1);

  {
    PinnableSlice pin_slice;
    ASSERT_EQ(Get("foo", &pin_slice), Status::OK());
    ASSERT_EQ(pin_slice.ToString(), "bar");
    // Entry is already in cache, lookup will remove the element from lru
    ASSERT_EQ(
        reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
        0);
  }
  // After PinnableSlice destruction element is added back in LRU
  ASSERT_EQ(
      reinterpret_cast<LRUCache*>(options.row_cache.get())->TEST_GetLRUSize(),
      1);
}

Y
Yi Wu 已提交
6128
#endif  // ROCKSDB_LITE
6129

Y
Yi Wu 已提交
6130 6131 6132 6133 6134
TEST_F(DBTest, DeletingOldWalAfterDrop) {
  rocksdb::SyncPoint::GetInstance()->LoadDependency(
      {{"Test:AllowFlushes", "DBImpl::BGWorkFlush"},
       {"DBImpl::BGWorkFlush:done", "Test:WaitForFlush"}});
  rocksdb::SyncPoint::GetInstance()->ClearTrace();
6135

Y
Yi Wu 已提交
6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146
  rocksdb::SyncPoint::GetInstance()->DisableProcessing();
  Options options = CurrentOptions();
  options.max_total_wal_size = 8192;
  options.compression = kNoCompression;
  options.write_buffer_size = 1 << 20;
  options.level0_file_num_compaction_trigger = (1 << 30);
  options.level0_slowdown_writes_trigger = (1 << 30);
  options.level0_stop_writes_trigger = (1 << 30);
  options.disable_auto_compactions = true;
  DestroyAndReopen(options);
  rocksdb::SyncPoint::GetInstance()->EnableProcessing();
6147

Y
Yi Wu 已提交
6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162
  CreateColumnFamilies({"cf1", "cf2"}, options);
  ASSERT_OK(Put(0, "key1", DummyString(8192)));
  ASSERT_OK(Put(0, "key2", DummyString(8192)));
  // the oldest wal should now be getting_flushed
  ASSERT_OK(db_->DropColumnFamily(handles_[0]));
  // all flushes should now do nothing because their CF is dropped
  TEST_SYNC_POINT("Test:AllowFlushes");
  TEST_SYNC_POINT("Test:WaitForFlush");
  uint64_t lognum1 = dbfull()->TEST_LogfileNumber();
  ASSERT_OK(Put(1, "key3", DummyString(8192)));
  ASSERT_OK(Put(1, "key4", DummyString(8192)));
  // new wal should have been created
  uint64_t lognum2 = dbfull()->TEST_LogfileNumber();
  EXPECT_GT(lognum2, lognum1);
}
6163

Y
Yi Wu 已提交
6164 6165 6166 6167 6168
TEST_F(DBTest, UnsupportedManualSync) {
  DestroyAndReopen(CurrentOptions());
  env_->is_wal_sync_thread_safe_.store(false);
  Status s = db_->SyncWAL();
  ASSERT_TRUE(s.IsNotSupported());
6169 6170
}

6171
INSTANTIATE_TEST_CASE_P(DBTestWithParam, DBTestWithParam,
6172 6173
                        ::testing::Combine(::testing::Values(1, 4),
                                           ::testing::Bool()));
6174

6175
TEST_F(DBTest, PauseBackgroundWorkTest) {
S
sdong 已提交
6176
  Options options = CurrentOptions();
6177 6178 6179
  options.write_buffer_size = 100000;  // Small write buffer
  Reopen(options);

D
Dmitri Smirnov 已提交
6180
  std::vector<port::Thread> threads;
6181
  std::atomic<bool> done(false);
6182 6183 6184 6185 6186 6187 6188 6189 6190 6191
  db_->PauseBackgroundWork();
  threads.emplace_back([&]() {
    Random rnd(301);
    for (int i = 0; i < 10000; ++i) {
      Put(RandomString(&rnd, 10), RandomString(&rnd, 10));
    }
    done.store(true);
  });
  env_->SleepForMicroseconds(200000);
  // make sure the thread is not done
A
Andrew Kryczka 已提交
6192
  ASSERT_FALSE(done.load());
6193 6194 6195 6196 6197
  db_->ContinueBackgroundWork();
  for (auto& t : threads) {
    t.join();
  }
  // now it's done
S
Siying Dong 已提交
6198
  ASSERT_TRUE(done.load());
6199
}
6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210

// Keep spawning short-living threads that create an iterator and quit.
// Meanwhile in another thread keep flushing memtables.
// This used to cause a deadlock.
TEST_F(DBTest, ThreadLocalPtrDeadlock) {
  std::atomic<int> flushes_done{0};
  std::atomic<int> threads_destroyed{0};
  auto done = [&] {
    return flushes_done.load() > 10;
  };

D
Dmitri Smirnov 已提交
6211
  port::Thread flushing_thread([&] {
6212 6213 6214 6215 6216 6217 6218 6219 6220
    for (int i = 0; !done(); ++i) {
      ASSERT_OK(db_->Put(WriteOptions(), Slice("hi"),
                         Slice(std::to_string(i).c_str())));
      ASSERT_OK(db_->Flush(FlushOptions()));
      int cnt = ++flushes_done;
      fprintf(stderr, "Flushed %d times\n", cnt);
    }
  });

D
Dmitri Smirnov 已提交
6221
  std::vector<port::Thread> thread_spawning_threads(10);
6222
  for (auto& t: thread_spawning_threads) {
D
Dmitri Smirnov 已提交
6223
    t = port::Thread([&] {
6224 6225
      while (!done()) {
        {
D
Dmitri Smirnov 已提交
6226
          port::Thread tmp_thread([&] {
6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243
            auto it = db_->NewIterator(ReadOptions());
            delete it;
          });
          tmp_thread.join();
        }
        ++threads_destroyed;
      }
    });
  }

  for (auto& t: thread_spawning_threads) {
    t.join();
  }
  flushing_thread.join();
  fprintf(stderr, "Done. Flushed %d times, destroyed %d threads\n",
          flushes_done.load(), threads_destroyed.load());
}
6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254

TEST_F(DBTest, LargeBlockSizeTest) {
  Options options = CurrentOptions();
  CreateAndReopenWithCF({"pikachu"}, options);
  ASSERT_OK(Put(0, "foo", "bar"));
  BlockBasedTableOptions table_options;
  table_options.block_size = 8LL*1024*1024*1024LL;
  options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  ASSERT_NOK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
}

6255
}  // namespace rocksdb
J
jorlow@chromium.org 已提交
6256 6257

int main(int argc, char** argv) {
6258
  rocksdb::port::InstallStackTraceHandler();
I
Igor Sugak 已提交
6259 6260
  ::testing::InitGoogleTest(&argc, argv);
  return RUN_ALL_TESTS();
J
jorlow@chromium.org 已提交
6261
}