db_impl_write.cc 67.4 KB
Newer Older
S
Siying Dong 已提交
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
S
Siying Dong 已提交
5 6 7 8
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#include "db/db_impl/db_impl.h"
S
Siying Dong 已提交
10

11
#include <cinttypes>
12
#include "db/error_handler.h"
13
#include "db/event_helpers.h"
14
#include "monitoring/perf_context_imp.h"
S
Siying Dong 已提交
15
#include "options/options_helper.h"
16
#include "test_util/sync_point.h"
S
Siying Dong 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45

namespace rocksdb {
// Convenience methods
Status DBImpl::Put(const WriteOptions& o, ColumnFamilyHandle* column_family,
                   const Slice& key, const Slice& val) {
  return DB::Put(o, column_family, key, val);
}

Status DBImpl::Merge(const WriteOptions& o, ColumnFamilyHandle* column_family,
                     const Slice& key, const Slice& val) {
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  if (!cfh->cfd()->ioptions()->merge_operator) {
    return Status::NotSupported("Provide a merge_operator when opening DB");
  } else {
    return DB::Merge(o, column_family, key, val);
  }
}

Status DBImpl::Delete(const WriteOptions& write_options,
                      ColumnFamilyHandle* column_family, const Slice& key) {
  return DB::Delete(write_options, column_family, key);
}

Status DBImpl::SingleDelete(const WriteOptions& write_options,
                            ColumnFamilyHandle* column_family,
                            const Slice& key) {
  return DB::SingleDelete(write_options, column_family, key);
}

46 47 48 49 50
void DBImpl::SetRecoverableStatePreReleaseCallback(
    PreReleaseCallback* callback) {
  recoverable_state_pre_release_callback_.reset(callback);
}

S
Siying Dong 已提交
51 52 53 54 55 56 57 58 59 60 61 62
Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) {
  return WriteImpl(write_options, my_batch, nullptr, nullptr);
}

#ifndef ROCKSDB_LITE
Status DBImpl::WriteWithCallback(const WriteOptions& write_options,
                                 WriteBatch* my_batch,
                                 WriteCallback* callback) {
  return WriteImpl(write_options, my_batch, callback, nullptr);
}
#endif  // ROCKSDB_LITE

63 64 65
// The main write queue. This is the only write queue that updates LastSequence.
// When using one write queue, the same sequence also indicates the last
// published sequence.
S
Siying Dong 已提交
66 67 68
Status DBImpl::WriteImpl(const WriteOptions& write_options,
                         WriteBatch* my_batch, WriteCallback* callback,
                         uint64_t* log_used, uint64_t log_ref,
69
                         bool disable_memtable, uint64_t* seq_used,
70
                         size_t batch_cnt,
71
                         PreReleaseCallback* pre_release_callback) {
72
  assert(!seq_per_batch_ || batch_cnt != 0);
S
Siying Dong 已提交
73 74 75
  if (my_batch == nullptr) {
    return Status::Corruption("Batch is nullptr!");
  }
76 77 78 79 80 81
  if (tracer_) {
    InstrumentedMutexLock lock(&trace_mutex_);
    if (tracer_) {
      tracer_->Write(my_batch);
    }
  }
82 83 84
  if (write_options.sync && write_options.disableWAL) {
    return Status::InvalidArgument("Sync writes has to enable WAL.");
  }
85
  if (two_write_queues_ && immutable_db_options_.enable_pipelined_write) {
86 87 88
    return Status::NotSupported(
        "pipelined_writes is not compatible with concurrent prepares");
  }
89
  if (seq_per_batch_ && immutable_db_options_.enable_pipelined_write) {
90
    // TODO(yiwu): update pipeline write with seq_per_batch and batch_cnt
91 92 93
    return Status::NotSupported(
        "pipelined_writes is not compatible with seq_per_batch");
  }
M
Maysam Yabandeh 已提交
94 95 96 97 98
  if (immutable_db_options_.unordered_write &&
      immutable_db_options_.enable_pipelined_write) {
    return Status::NotSupported(
        "pipelined_writes is not compatible with unordered_write");
  }
99 100 101
  // Otherwise IsLatestPersistentState optimization does not make sense
  assert(!WriteBatchInternal::IsLatestPersistentState(my_batch) ||
         disable_memtable);
S
Siying Dong 已提交
102

103 104 105 106 107 108 109 110
  Status status;
  if (write_options.low_pri) {
    status = ThrottleLowPriWritesIfNeeded(write_options, my_batch);
    if (!status.ok()) {
      return status;
    }
  }

111
  if (two_write_queues_ && disable_memtable) {
M
Maysam Yabandeh 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
    AssignOrder assign_order =
        seq_per_batch_ ? kDoAssignOrder : kDontAssignOrder;
    // Otherwise it is WAL-only Prepare batches in WriteCommitted policy and
    // they don't consume sequence.
    return WriteImplWALOnly(&nonmem_write_thread_, write_options, my_batch,
                            callback, log_used, log_ref, seq_used, batch_cnt,
                            pre_release_callback, assign_order,
                            kDontPublishLastSeq, disable_memtable);
  }

  if (immutable_db_options_.unordered_write) {
    const size_t sub_batch_cnt = batch_cnt != 0
                                     ? batch_cnt
                                     // every key is a sub-batch consuming a seq
                                     : WriteBatchInternal::Count(my_batch);
    uint64_t seq;
    // Use a write thread to i) optimize for WAL write, ii) publish last
    // sequence in in increasing order, iii) call pre_release_callback serially
    status = WriteImplWALOnly(&write_thread_, write_options, my_batch, callback,
                              log_used, log_ref, &seq, sub_batch_cnt,
                              pre_release_callback, kDoAssignOrder,
                              kDoPublishLastSeq, disable_memtable);
134
    TEST_SYNC_POINT("DBImpl::WriteImpl:UnorderedWriteAfterWriteWAL");
M
Maysam Yabandeh 已提交
135 136 137 138 139 140 141
    if (!status.ok()) {
      return status;
    }
    if (seq_used) {
      *seq_used = seq;
    }
    if (!disable_memtable) {
142
      TEST_SYNC_POINT("DBImpl::WriteImpl:BeforeUnorderedWriteMemtable");
M
Maysam Yabandeh 已提交
143 144 145 146
      status = UnorderedWriteMemtable(write_options, my_batch, callback,
                                      log_ref, seq, sub_batch_cnt);
    }
    return status;
147 148
  }

149 150
  if (immutable_db_options_.enable_pipelined_write) {
    return PipelinedWriteImpl(write_options, my_batch, callback, log_used,
151
                              log_ref, disable_memtable, seq_used);
152 153
  }

S
Siying Dong 已提交
154 155
  PERF_TIMER_GUARD(write_pre_and_post_process_time);
  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
156
                        disable_memtable, batch_cnt, pre_release_callback);
S
Siying Dong 已提交
157 158 159 160 161 162 163 164

  if (!write_options.disableWAL) {
    RecordTick(stats_, WRITE_WITH_WAL);
  }

  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);

  write_thread_.JoinBatchGroup(&w);
165
  if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
S
Siying Dong 已提交
166 167 168
    // we are a non-leader in a parallel group

    if (w.ShouldWriteToMemtable()) {
169 170 171
      PERF_TIMER_STOP(write_pre_and_post_process_time);
      PERF_TIMER_GUARD(write_memtable_time);

S
Siying Dong 已提交
172 173 174
      ColumnFamilyMemTablesImpl column_family_memtables(
          versions_->GetColumnFamilySet());
      w.status = WriteBatchInternal::InsertInto(
Y
Yi Wu 已提交
175
          &w, w.sequence, &column_family_memtables, &flush_scheduler_,
176
          &trim_history_scheduler_,
S
Siying Dong 已提交
177
          write_options.ignore_missing_column_families, 0 /*log_number*/, this,
178
          true /*concurrent_memtable_writes*/, seq_per_batch_, w.batch_cnt,
179
          batch_per_txn_, write_options.memtable_insert_hint_per_batch);
180 181

      PERF_TIMER_START(write_pre_and_post_process_time);
S
Siying Dong 已提交
182 183
    }

184
    if (write_thread_.CompleteParallelMemTableWriter(&w)) {
Y
Yi Wu 已提交
185
      // we're responsible for exit batch group
186
      // TODO(myabandeh): propagate status to write_group
187
      auto last_sequence = w.write_group->last_sequence;
S
Siying Dong 已提交
188
      versions_->SetLastSequence(last_sequence);
189
      MemTableInsertStatusCheck(w.status);
Y
Yi Wu 已提交
190
      write_thread_.ExitAsBatchGroupFollower(&w);
S
Siying Dong 已提交
191 192 193 194 195 196 197 198 199 200
    }
    assert(w.state == WriteThread::STATE_COMPLETED);
    // STATE_COMPLETED conditional below handles exit

    status = w.FinalStatus();
  }
  if (w.state == WriteThread::STATE_COMPLETED) {
    if (log_used != nullptr) {
      *log_used = w.log_used;
    }
201 202 203
    if (seq_used != nullptr) {
      *seq_used = w.sequence;
    }
S
Siying Dong 已提交
204 205 206 207 208 209 210 211 212 213 214
    // write is complete and leader has updated sequence
    return w.FinalStatus();
  }
  // else we are the leader of the write batch group
  assert(w.state == WriteThread::STATE_GROUP_LEADER);

  // Once reaches this point, the current writer "w" will try to do its write
  // job.  It may also pick up some of the remaining writers in the "writers_"
  // when it finds suitable, and finish them in the same write batch.
  // This is how a write job could be done by the other writer.
  WriteContext write_context;
215
  WriteThread::WriteGroup write_group;
Y
Yi Wu 已提交
216
  bool in_parallel_group = false;
S
Siying Dong 已提交
217
  uint64_t last_sequence = kMaxSequenceNumber;
S
Siying Dong 已提交
218 219 220

  mutex_.Lock();

221
  bool need_log_sync = write_options.sync;
S
Siying Dong 已提交
222
  bool need_log_dir_sync = need_log_sync && !log_dir_synced_;
223
  if (!two_write_queues_ || !disable_memtable) {
224 225 226
    // With concurrent writes we do preprocess only in the write thread that
    // also does write to memtable to avoid sync issue on shared data structure
    // with the other thread
227 228 229 230

    // PreprocessWrite does its own perf timing.
    PERF_TIMER_STOP(write_pre_and_post_process_time);

231
    status = PreprocessWrite(write_options, &need_log_sync, &write_context);
232 233 234 235 236
    if (!two_write_queues_) {
      // Assign it after ::PreprocessWrite since the sequence might advance
      // inside it by WriteRecoverableState
      last_sequence = versions_->LastSequence();
    }
237 238

    PERF_TIMER_START(write_pre_and_post_process_time);
239 240
  }
  log::Writer* log_writer = logs_.back().writer;
S
Siying Dong 已提交
241 242 243 244 245 246 247 248

  mutex_.Unlock();

  // Add to log and apply to memtable.  We can release the lock
  // during this phase since &w is currently responsible for logging
  // and protects against concurrent loggers and concurrent writes
  // into memtables

249
  TEST_SYNC_POINT("DBImpl::WriteImpl:BeforeLeaderEnters");
S
Siying Dong 已提交
250
  last_batch_group_size_ =
251
      write_thread_.EnterAsBatchGroupLeader(&w, &write_group);
S
Siying Dong 已提交
252 253 254 255 256

  if (status.ok()) {
    // Rules for when we can update the memtable concurrently
    // 1. supported by memtable
    // 2. Puts are not okay if inplace_update_support
257
    // 3. Merges are not okay
S
Siying Dong 已提交
258
    //
259
    // Rules 1..2 are enforced by checking the options
S
Siying Dong 已提交
260 261
    // during startup (CheckConcurrentWritesSupported), so if
    // options.allow_concurrent_memtable_write is true then they can be
262 263
    // assumed to be true.  Rule 3 is checked for each batch.  We could
    // relax rules 2 if we could prevent write batches from referring
S
Siying Dong 已提交
264 265
    // more than once to a particular key.
    bool parallel = immutable_db_options_.allow_concurrent_memtable_write &&
266
                    write_group.size > 1;
267
    size_t total_count = 0;
268
    size_t valid_batches = 0;
269
    size_t total_byte_size = 0;
270
    size_t pre_release_callback_cnt = 0;
271
    for (auto* writer : write_group) {
S
Siying Dong 已提交
272
      if (writer->CheckCallback(this)) {
273
        valid_batches += writer->batch_cnt;
S
Siying Dong 已提交
274 275 276 277
        if (writer->ShouldWriteToMemtable()) {
          total_count += WriteBatchInternal::Count(writer->batch);
          parallel = parallel && !writer->batch->HasMerge();
        }
I
Igor Canadi 已提交
278 279
        total_byte_size = WriteBatchInternal::AppendedByteSize(
            total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
280 281 282
        if (writer->pre_release_callback) {
          pre_release_callback_cnt++;
        }
S
Siying Dong 已提交
283 284
      }
    }
285 286 287 288 289 290 291
    // Note about seq_per_batch_: either disableWAL is set for the entire write
    // group or not. In either case we inc seq for each write batch with no
    // failed callback. This means that there could be a batch with
    // disalbe_memtable in between; although we do not write this batch to
    // memtable it still consumes a seq. Otherwise, if !seq_per_batch_, we inc
    // the seq per valid written key to mem.
    size_t seq_inc = seq_per_batch_ ? valid_batches : total_count;
S
Siying Dong 已提交
292

293
    const bool concurrent_update = two_write_queues_;
S
Siying Dong 已提交
294 295 296
    // Update stats while we are an exclusive group leader, so we know
    // that nobody else can be writing to these particular stats.
    // We're optimistic, updating the stats before we successfully
Y
Yi Wu 已提交
297
    // commit.  That lets us release our leader status early.
S
Siying Dong 已提交
298
    auto stats = default_cf_internal_stats_;
299
    stats->AddDBStats(InternalStats::kIntStatsNumKeysWritten, total_count,
300
                      concurrent_update);
S
Siying Dong 已提交
301
    RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);
302
    stats->AddDBStats(InternalStats::kIntStatsBytesWritten, total_byte_size,
303
                      concurrent_update);
S
Siying Dong 已提交
304
    RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
305 306
    stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1,
                      concurrent_update);
S
Siying Dong 已提交
307
    RecordTick(stats_, WRITE_DONE_BY_SELF);
308
    auto write_done_by_other = write_group.size - 1;
S
Siying Dong 已提交
309
    if (write_done_by_other > 0) {
310 311
      stats->AddDBStats(InternalStats::kIntStatsWriteDoneByOther,
                        write_done_by_other, concurrent_update);
S
Siying Dong 已提交
312 313
      RecordTick(stats_, WRITE_DONE_BY_OTHER, write_done_by_other);
    }
S
Siying Dong 已提交
314
    RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size);
S
Siying Dong 已提交
315 316 317 318 319 320 321

    if (write_options.disableWAL) {
      has_unpersisted_data_.store(true, std::memory_order_relaxed);
    }

    PERF_TIMER_STOP(write_pre_and_post_process_time);

322
    if (!two_write_queues_) {
323 324 325 326 327 328 329 330
      if (status.ok() && !write_options.disableWAL) {
        PERF_TIMER_GUARD(write_wal_time);
        status = WriteToWAL(write_group, log_writer, log_used, need_log_sync,
                            need_log_dir_sync, last_sequence + 1);
      }
    } else {
      if (status.ok() && !write_options.disableWAL) {
        PERF_TIMER_GUARD(write_wal_time);
331
        // LastAllocatedSequence is increased inside WriteToWAL under
332 333
        // wal_write_mutex_ to ensure ordered events in WAL
        status = ConcurrentWriteToWAL(write_group, log_used, &last_sequence,
334
                                      seq_inc);
335 336
      } else {
        // Otherwise we inc seq number for memtable writes
337
        last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
S
Siying Dong 已提交
338 339
      }
    }
S
Siying Dong 已提交
340
    assert(last_sequence != kMaxSequenceNumber);
341
    const SequenceNumber current_sequence = last_sequence + 1;
342
    last_sequence += seq_inc;
S
Siying Dong 已提交
343

344 345 346
    // PreReleaseCallback is called after WAL write and before memtable write
    if (status.ok()) {
      SequenceNumber next_sequence = current_sequence;
347
      size_t index = 0;
348 349 350 351 352 353 354 355 356 357
      // Note: the logic for advancing seq here must be consistent with the
      // logic in WriteBatchInternal::InsertInto(write_group...) as well as
      // with WriteBatchInternal::InsertInto(write_batch...) that is called on
      // the merged batch during recovery from the WAL.
      for (auto* writer : write_group) {
        if (writer->CallbackFailed()) {
          continue;
        }
        writer->sequence = next_sequence;
        if (writer->pre_release_callback) {
358
          Status ws = writer->pre_release_callback->Callback(
359 360
              writer->sequence, disable_memtable, writer->log_used, index++,
              pre_release_callback_cnt);
361 362 363 364 365 366 367 368 369 370 371 372 373 374
          if (!ws.ok()) {
            status = ws;
            break;
          }
        }
        if (seq_per_batch_) {
          assert(writer->batch_cnt);
          next_sequence += writer->batch_cnt;
        } else if (writer->ShouldWriteToMemtable()) {
          next_sequence += WriteBatchInternal::Count(writer->batch);
        }
      }
    }

S
Siying Dong 已提交
375 376 377 378
    if (status.ok()) {
      PERF_TIMER_GUARD(write_memtable_time);

      if (!parallel) {
379
        // w.sequence will be set inside InsertInto
Y
Yi Wu 已提交
380
        w.status = WriteBatchInternal::InsertInto(
S
Siying Dong 已提交
381
            write_group, current_sequence, column_family_memtables_.get(),
382 383
            &flush_scheduler_, &trim_history_scheduler_,
            write_options.ignore_missing_column_families,
384 385
            0 /*recovery_log_number*/, this, parallel, seq_per_batch_,
            batch_per_txn_);
S
Siying Dong 已提交
386
      } else {
387 388
        write_group.last_sequence = last_sequence;
        write_thread_.LaunchParallelMemTableWriters(&write_group);
Y
Yi Wu 已提交
389
        in_parallel_group = true;
S
Siying Dong 已提交
390

391 392
        // Each parallel follower is doing each own writes. The leader should
        // also do its own.
S
Siying Dong 已提交
393 394 395 396 397
        if (w.ShouldWriteToMemtable()) {
          ColumnFamilyMemTablesImpl column_family_memtables(
              versions_->GetColumnFamilySet());
          assert(w.sequence == current_sequence);
          w.status = WriteBatchInternal::InsertInto(
Y
Yi Wu 已提交
398
              &w, w.sequence, &column_family_memtables, &flush_scheduler_,
399
              &trim_history_scheduler_,
S
Siying Dong 已提交
400
              write_options.ignore_missing_column_families, 0 /*log_number*/,
401
              this, true /*concurrent_memtable_writes*/, seq_per_batch_,
402 403
              w.batch_cnt, batch_per_txn_,
              write_options.memtable_insert_hint_per_batch);
S
Siying Dong 已提交
404
        }
405 406 407
      }
      if (seq_used != nullptr) {
        *seq_used = w.sequence;
S
Siying Dong 已提交
408 409 410 411 412
      }
    }
  }
  PERF_TIMER_START(write_pre_and_post_process_time);

413
  if (!w.CallbackFailed()) {
414
    WriteStatusCheck(status);
S
Siying Dong 已提交
415 416
  }

417
  if (need_log_sync) {
S
Siying Dong 已提交
418 419 420
    mutex_.Lock();
    MarkLogsSynced(logfile_number_, need_log_dir_sync, status);
    mutex_.Unlock();
421
    // Requesting sync with two_write_queues_ is expected to be very rare. We
422
    // hence provide a simple implementation that is not necessarily efficient.
423
    if (two_write_queues_) {
424 425 426 427 428 429
      if (manual_wal_flush_) {
        status = FlushWAL(true);
      } else {
        status = SyncWAL();
      }
    }
S
Siying Dong 已提交
430 431
  }

Y
Yi Wu 已提交
432 433 434 435
  bool should_exit_batch_group = true;
  if (in_parallel_group) {
    // CompleteParallelWorker returns true if this thread should
    // handle exit, false means somebody else did
436
    should_exit_batch_group = write_thread_.CompleteParallelMemTableWriter(&w);
Y
Yi Wu 已提交
437 438
  }
  if (should_exit_batch_group) {
439
    if (status.ok()) {
440 441
      // Note: if we are to resume after non-OK statuses we need to revisit how
      // we reacts to non-OK statuses here.
442 443
      versions_->SetLastSequence(last_sequence);
    }
444
    MemTableInsertStatusCheck(w.status);
445
    write_thread_.ExitAsBatchGroupLeader(write_group, status);
S
Siying Dong 已提交
446 447
  }

Y
Yi Wu 已提交
448 449 450
  if (status.ok()) {
    status = w.FinalStatus();
  }
S
Siying Dong 已提交
451 452 453
  return status;
}

454 455 456
Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options,
                                  WriteBatch* my_batch, WriteCallback* callback,
                                  uint64_t* log_used, uint64_t log_ref,
457
                                  bool disable_memtable, uint64_t* seq_used) {
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
  PERF_TIMER_GUARD(write_pre_and_post_process_time);
  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);

  WriteContext write_context;

  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
                        disable_memtable);
  write_thread_.JoinBatchGroup(&w);
  if (w.state == WriteThread::STATE_GROUP_LEADER) {
    WriteThread::WriteGroup wal_write_group;
    if (w.callback && !w.callback->AllowWriteBatching()) {
      write_thread_.WaitForMemTableWriters();
    }
    mutex_.Lock();
    bool need_log_sync = !write_options.disableWAL && write_options.sync;
    bool need_log_dir_sync = need_log_sync && !log_dir_synced_;
474 475
    // PreprocessWrite does its own perf timing.
    PERF_TIMER_STOP(write_pre_and_post_process_time);
476
    w.status = PreprocessWrite(write_options, &need_log_sync, &write_context);
477
    PERF_TIMER_START(write_pre_and_post_process_time);
478
    log::Writer* log_writer = logs_.back().writer;
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
    mutex_.Unlock();

    // This can set non-OK status if callback fail.
    last_batch_group_size_ =
        write_thread_.EnterAsBatchGroupLeader(&w, &wal_write_group);
    const SequenceNumber current_sequence =
        write_thread_.UpdateLastSequence(versions_->LastSequence()) + 1;
    size_t total_count = 0;
    size_t total_byte_size = 0;

    if (w.status.ok()) {
      SequenceNumber next_sequence = current_sequence;
      for (auto writer : wal_write_group) {
        if (writer->CheckCallback(this)) {
          if (writer->ShouldWriteToMemtable()) {
            writer->sequence = next_sequence;
            size_t count = WriteBatchInternal::Count(writer->batch);
            next_sequence += count;
            total_count += count;
          }
          total_byte_size = WriteBatchInternal::AppendedByteSize(
              total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
        }
      }
      if (w.disable_wal) {
        has_unpersisted_data_.store(true, std::memory_order_relaxed);
      }
      write_thread_.UpdateLastSequence(current_sequence + total_count - 1);
    }

    auto stats = default_cf_internal_stats_;
510
    stats->AddDBStats(InternalStats::kIntStatsNumKeysWritten, total_count);
511
    RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);
512
    stats->AddDBStats(InternalStats::kIntStatsBytesWritten, total_byte_size);
513
    RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
S
Siying Dong 已提交
514
    RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size);
515 516 517

    PERF_TIMER_STOP(write_pre_and_post_process_time);

518
    if (w.status.ok() && !write_options.disableWAL) {
519
      PERF_TIMER_GUARD(write_wal_time);
520
      stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1);
521 522
      RecordTick(stats_, WRITE_DONE_BY_SELF, 1);
      if (wal_write_group.size > 1) {
523
        stats->AddDBStats(InternalStats::kIntStatsWriteDoneByOther,
524 525 526
                          wal_write_group.size - 1);
        RecordTick(stats_, WRITE_DONE_BY_OTHER, wal_write_group.size - 1);
      }
527 528
      w.status = WriteToWAL(wal_write_group, log_writer, log_used,
                            need_log_sync, need_log_dir_sync, current_sequence);
529 530 531
    }

    if (!w.CallbackFailed()) {
532
      WriteStatusCheck(w.status);
533 534 535 536 537 538 539 540 541 542 543 544 545 546
    }

    if (need_log_sync) {
      mutex_.Lock();
      MarkLogsSynced(logfile_number_, need_log_dir_sync, w.status);
      mutex_.Unlock();
    }

    write_thread_.ExitAsBatchGroupLeader(wal_write_group, w.status);
  }

  WriteThread::WriteGroup memtable_write_group;
  if (w.state == WriteThread::STATE_MEMTABLE_WRITER_LEADER) {
    PERF_TIMER_GUARD(write_memtable_time);
547
    assert(w.ShouldWriteToMemtable());
548 549 550 551 552 553 554
    write_thread_.EnterAsMemTableWriter(&w, &memtable_write_group);
    if (memtable_write_group.size > 1 &&
        immutable_db_options_.allow_concurrent_memtable_write) {
      write_thread_.LaunchParallelMemTableWriters(&memtable_write_group);
    } else {
      memtable_write_group.status = WriteBatchInternal::InsertInto(
          memtable_write_group, w.sequence, column_family_memtables_.get(),
555 556 557
          &flush_scheduler_, &trim_history_scheduler_,
          write_options.ignore_missing_column_families, 0 /*log_number*/, this,
          false /*concurrent_memtable_writes*/, seq_per_batch_, batch_per_txn_);
558 559 560 561 562 563 564 565 566 567
      versions_->SetLastSequence(memtable_write_group.last_sequence);
      write_thread_.ExitAsMemTableWriter(&w, memtable_write_group);
    }
  }

  if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
    assert(w.ShouldWriteToMemtable());
    ColumnFamilyMemTablesImpl column_family_memtables(
        versions_->GetColumnFamilySet());
    w.status = WriteBatchInternal::InsertInto(
Y
Yi Wu 已提交
568
        &w, w.sequence, &column_family_memtables, &flush_scheduler_,
569
        &trim_history_scheduler_, write_options.ignore_missing_column_families,
570 571 572
        0 /*log_number*/, this, true /*concurrent_memtable_writes*/,
        false /*seq_per_batch*/, 0 /*batch_cnt*/, true /*batch_per_txn*/,
        write_options.memtable_insert_hint_per_batch);
573 574 575 576 577 578
    if (write_thread_.CompleteParallelMemTableWriter(&w)) {
      MemTableInsertStatusCheck(w.status);
      versions_->SetLastSequence(w.write_group->last_sequence);
      write_thread_.ExitAsMemTableWriter(&w, *w.write_group);
    }
  }
579 580 581
  if (seq_used != nullptr) {
    *seq_used = w.sequence;
  }
582 583

  assert(w.state == WriteThread::STATE_COMPLETED);
584 585 586
  return w.FinalStatus();
}

M
Maysam Yabandeh 已提交
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
Status DBImpl::UnorderedWriteMemtable(const WriteOptions& write_options,
                                      WriteBatch* my_batch,
                                      WriteCallback* callback, uint64_t log_ref,
                                      SequenceNumber seq,
                                      const size_t sub_batch_cnt) {
  PERF_TIMER_GUARD(write_pre_and_post_process_time);
  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);

  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
                        false /*disable_memtable*/);

  if (w.CheckCallback(this) && w.ShouldWriteToMemtable()) {
    w.sequence = seq;
    size_t total_count = WriteBatchInternal::Count(my_batch);
    InternalStats* stats = default_cf_internal_stats_;
602
    stats->AddDBStats(InternalStats::kIntStatsNumKeysWritten, total_count);
M
Maysam Yabandeh 已提交
603 604 605 606 607 608
    RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count);

    ColumnFamilyMemTablesImpl column_family_memtables(
        versions_->GetColumnFamilySet());
    w.status = WriteBatchInternal::InsertInto(
        &w, w.sequence, &column_family_memtables, &flush_scheduler_,
609 610
        &trim_history_scheduler_, write_options.ignore_missing_column_families,
        0 /*log_number*/, this, true /*concurrent_memtable_writes*/,
611 612
        seq_per_batch_, sub_batch_cnt, true /*batch_per_txn*/,
        write_options.memtable_insert_hint_per_batch);
M
Maysam Yabandeh 已提交
613 614 615 616 617 618 619 620 621

    WriteStatusCheck(w.status);
    if (write_options.disableWAL) {
      has_unpersisted_data_.store(true, std::memory_order_relaxed);
    }
  }

  size_t pending_cnt = pending_memtable_writes_.fetch_sub(1) - 1;
  if (pending_cnt == 0) {
622 623 624 625 626
    // switch_cv_ waits until pending_memtable_writes_ = 0. Locking its mutex
    // before notify ensures that cv is in waiting state when it is notified
    // thus not missing the update to pending_memtable_writes_ even though it is
    // not modified under the mutex.
    std::lock_guard<std::mutex> lck(switch_mutex_);
M
Maysam Yabandeh 已提交
627 628 629 630 631 632 633 634 635
    switch_cv_.notify_all();
  }

  if (!w.FinalStatus().ok()) {
    return w.FinalStatus();
  }
  return Status::OK();
}

636 637 638
// The 2nd write queue. If enabled it will be used only for WAL-only writes.
// This is the only queue that updates LastPublishedSequence which is only
// applicable in a two-queue setting.
M
Maysam Yabandeh 已提交
639 640 641 642 643 644
Status DBImpl::WriteImplWALOnly(
    WriteThread* write_thread, const WriteOptions& write_options,
    WriteBatch* my_batch, WriteCallback* callback, uint64_t* log_used,
    const uint64_t log_ref, uint64_t* seq_used, const size_t sub_batch_cnt,
    PreReleaseCallback* pre_release_callback, const AssignOrder assign_order,
    const PublishLastSeq publish_last_seq, const bool disable_memtable) {
645 646 647
  Status status;
  PERF_TIMER_GUARD(write_pre_and_post_process_time);
  WriteThread::Writer w(write_options, my_batch, callback, log_ref,
M
Maysam Yabandeh 已提交
648
                        disable_memtable, sub_batch_cnt, pre_release_callback);
649 650
  RecordTick(stats_, WRITE_WITH_WAL);
  StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
651

M
Maysam Yabandeh 已提交
652
  write_thread->JoinBatchGroup(&w);
653 654 655 656 657
  assert(w.state != WriteThread::STATE_PARALLEL_MEMTABLE_WRITER);
  if (w.state == WriteThread::STATE_COMPLETED) {
    if (log_used != nullptr) {
      *log_used = w.log_used;
    }
658 659 660
    if (seq_used != nullptr) {
      *seq_used = w.sequence;
    }
661 662 663 664
    return w.FinalStatus();
  }
  // else we are the leader of the write batch group
  assert(w.state == WriteThread::STATE_GROUP_LEADER);
M
Maysam Yabandeh 已提交
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688

  if (publish_last_seq == kDoPublishLastSeq) {
    // Currently we only use kDoPublishLastSeq in unordered_write
    assert(immutable_db_options_.unordered_write);
    WriteContext write_context;
    if (error_handler_.IsDBStopped()) {
      status = error_handler_.GetBGError();
    }
    // TODO(myabandeh): Make preliminary checks thread-safe so we could do them
    // without paying the cost of obtaining the mutex.
    if (status.ok()) {
      InstrumentedMutexLock l(&mutex_);
      bool need_log_sync = false;
      status = PreprocessWrite(write_options, &need_log_sync, &write_context);
      WriteStatusCheck(status);
    }
    if (!status.ok()) {
      WriteThread::WriteGroup write_group;
      write_thread->EnterAsBatchGroupLeader(&w, &write_group);
      write_thread->ExitAsBatchGroupLeader(write_group, status);
      return status;
    }
  }

689 690
  WriteThread::WriteGroup write_group;
  uint64_t last_sequence;
M
Maysam Yabandeh 已提交
691
  write_thread->EnterAsBatchGroupLeader(&w, &write_group);
692 693 694
  // Note: no need to update last_batch_group_size_ here since the batch writes
  // to WAL only

695
  size_t pre_release_callback_cnt = 0;
696
  size_t total_byte_size = 0;
697 698 699 700
  for (auto* writer : write_group) {
    if (writer->CheckCallback(this)) {
      total_byte_size = WriteBatchInternal::AppendedByteSize(
          total_byte_size, WriteBatchInternal::ByteSize(writer->batch));
701 702 703
      if (writer->pre_release_callback) {
        pre_release_callback_cnt++;
      }
704 705 706
    }
  }

707
  const bool concurrent_update = true;
708 709 710 711 712
  // Update stats while we are an exclusive group leader, so we know
  // that nobody else can be writing to these particular stats.
  // We're optimistic, updating the stats before we successfully
  // commit.  That lets us release our leader status early.
  auto stats = default_cf_internal_stats_;
713
  stats->AddDBStats(InternalStats::kIntStatsBytesWritten, total_byte_size,
714
                    concurrent_update);
715
  RecordTick(stats_, BYTES_WRITTEN, total_byte_size);
716 717
  stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1,
                    concurrent_update);
718 719 720
  RecordTick(stats_, WRITE_DONE_BY_SELF);
  auto write_done_by_other = write_group.size - 1;
  if (write_done_by_other > 0) {
721 722
    stats->AddDBStats(InternalStats::kIntStatsWriteDoneByOther,
                      write_done_by_other, concurrent_update);
723 724
    RecordTick(stats_, WRITE_DONE_BY_OTHER, write_done_by_other);
  }
S
Siying Dong 已提交
725
  RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size);
726 727 728 729

  PERF_TIMER_STOP(write_pre_and_post_process_time);

  PERF_TIMER_GUARD(write_wal_time);
730
  // LastAllocatedSequence is increased inside WriteToWAL under
731
  // wal_write_mutex_ to ensure ordered events in WAL
732
  size_t seq_inc = 0 /* total_count */;
M
Maysam Yabandeh 已提交
733
  if (assign_order == kDoAssignOrder) {
734 735
    size_t total_batch_cnt = 0;
    for (auto* writer : write_group) {
M
Maysam Yabandeh 已提交
736 737 738 739
      assert(writer->batch_cnt || !seq_per_batch_);
      if (!writer->CallbackFailed()) {
        total_batch_cnt += writer->batch_cnt;
      }
740 741 742
    }
    seq_inc = total_batch_cnt;
  }
M
Maysam Yabandeh 已提交
743 744 745 746 747 748 749
  if (!write_options.disableWAL) {
    status =
        ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc);
  } else {
    // Otherwise we inc seq number to do solely the seq allocation
    last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
  }
M
Maysam Yabandeh 已提交
750 751

  size_t memtable_write_cnt = 0;
752 753
  auto curr_seq = last_sequence + 1;
  for (auto* writer : write_group) {
754 755
    if (writer->CallbackFailed()) {
      continue;
756
    }
757
    writer->sequence = curr_seq;
M
Maysam Yabandeh 已提交
758 759
    if (assign_order == kDoAssignOrder) {
      assert(writer->batch_cnt || !seq_per_batch_);
760
      curr_seq += writer->batch_cnt;
761
    }
M
Maysam Yabandeh 已提交
762 763 764
    if (!writer->disable_memtable) {
      memtable_write_cnt++;
    }
765
    // else seq advances only by memtable writes
766
  }
767
  if (status.ok() && write_options.sync) {
M
Maysam Yabandeh 已提交
768
    assert(!write_options.disableWAL);
769
    // Requesting sync with two_write_queues_ is expected to be very rare. We
770 771 772 773 774 775 776 777 778 779
    // hance provide a simple implementation that is not necessarily efficient.
    if (manual_wal_flush_) {
      status = FlushWAL(true);
    } else {
      status = SyncWAL();
    }
  }
  PERF_TIMER_START(write_pre_and_post_process_time);

  if (!w.CallbackFailed()) {
780
    WriteStatusCheck(status);
781
  }
782
  if (status.ok()) {
783
    size_t index = 0;
784 785 786
    for (auto* writer : write_group) {
      if (!writer->CallbackFailed() && writer->pre_release_callback) {
        assert(writer->sequence != kMaxSequenceNumber);
787
        Status ws = writer->pre_release_callback->Callback(
788 789
            writer->sequence, disable_memtable, writer->log_used, index++,
            pre_release_callback_cnt);
790 791 792 793 794 795 796
        if (!ws.ok()) {
          status = ws;
          break;
        }
      }
    }
  }
M
Maysam Yabandeh 已提交
797 798 799 800 801 802 803 804 805
  if (publish_last_seq == kDoPublishLastSeq) {
    versions_->SetLastSequence(last_sequence + seq_inc);
    // Currently we only use kDoPublishLastSeq in unordered_write
    assert(immutable_db_options_.unordered_write);
  }
  if (immutable_db_options_.unordered_write && status.ok()) {
    pending_memtable_writes_ += memtable_write_cnt;
  }
  write_thread->ExitAsBatchGroupLeader(write_group, status);
806 807 808
  if (status.ok()) {
    status = w.FinalStatus();
  }
809 810 811
  if (seq_used != nullptr) {
    *seq_used = w.sequence;
  }
812
  return status;
813 814
}

815
void DBImpl::WriteStatusCheck(const Status& status) {
816 817 818 819 820
  // Is setting bg_error_ enough here?  This will at least stop
  // compaction and fail any further writes.
  if (immutable_db_options_.paranoid_checks && !status.ok() &&
      !status.IsBusy() && !status.IsIncomplete()) {
    mutex_.Lock();
821
    error_handler_.SetBGError(status, BackgroundErrorReason::kWriteCallback);
822 823 824 825 826
    mutex_.Unlock();
  }
}

void DBImpl::MemTableInsertStatusCheck(const Status& status) {
Y
Yi Wu 已提交
827 828 829 830 831
  // A non-OK status here indicates that the state implied by the
  // WAL has diverged from the in-memory state.  This could be
  // because of a corrupt write_batch (very bad), or because the
  // client specified an invalid column family and didn't specify
  // ignore_missing_column_families.
832
  if (!status.ok()) {
Y
Yi Wu 已提交
833
    mutex_.Lock();
834 835
    assert(!error_handler_.IsBGWorkStopped());
    error_handler_.SetBGError(status, BackgroundErrorReason::kMemTable);
Y
Yi Wu 已提交
836 837 838 839
    mutex_.Unlock();
  }
}

S
Siying Dong 已提交
840
Status DBImpl::PreprocessWrite(const WriteOptions& write_options,
841
                               bool* need_log_sync,
S
Siying Dong 已提交
842 843
                               WriteContext* write_context) {
  mutex_.AssertHeld();
844
  assert(write_context != nullptr && need_log_sync != nullptr);
S
Siying Dong 已提交
845 846
  Status status;

847 848 849 850
  if (error_handler_.IsDBStopped()) {
    status = error_handler_.GetBGError();
  }

851 852
  PERF_TIMER_GUARD(write_scheduling_flushes_compactions_time);

S
Siying Dong 已提交
853 854 855 856
  assert(!single_column_family_mode_ ||
         versions_->GetColumnFamilySet()->NumberOfColumnFamilies() == 1);
  if (UNLIKELY(status.ok() && !single_column_family_mode_ &&
               total_log_size_ > GetMaxTotalWalSize())) {
M
Maysam Yabandeh 已提交
857
    WaitForPendingWrites();
858
    status = SwitchWAL(write_context);
S
Siying Dong 已提交
859 860 861 862 863 864 865 866
  }

  if (UNLIKELY(status.ok() && write_buffer_manager_->ShouldFlush())) {
    // Before a new memtable is added in SwitchMemtable(),
    // write_buffer_manager_->ShouldFlush() will keep returning true. If another
    // thread is writing to another DB with the same write buffer, they may also
    // be flushed. We may end up with flushing much more DBs than needed. It's
    // suboptimal but still correct.
M
Maysam Yabandeh 已提交
867
    WaitForPendingWrites();
S
Siying Dong 已提交
868 869 870
    status = HandleWriteBufferFull(write_context);
  }

871 872 873 874
  if (UNLIKELY(status.ok() && !trim_history_scheduler_.Empty())) {
    status = TrimMemtableHistory(write_context);
  }

S
Siying Dong 已提交
875
  if (UNLIKELY(status.ok() && !flush_scheduler_.Empty())) {
M
Maysam Yabandeh 已提交
876
    WaitForPendingWrites();
S
Siying Dong 已提交
877 878 879
    status = ScheduleFlushes(write_context);
  }

880 881 882
  PERF_TIMER_STOP(write_scheduling_flushes_compactions_time);
  PERF_TIMER_GUARD(write_pre_and_post_process_time);

S
Siying Dong 已提交
883 884
  if (UNLIKELY(status.ok() && (write_controller_.IsStopped() ||
                               write_controller_.NeedsDelay()))) {
885
    PERF_TIMER_STOP(write_pre_and_post_process_time);
S
Siying Dong 已提交
886 887 888 889 890 891
    PERF_TIMER_GUARD(write_delay_time);
    // We don't know size of curent batch so that we always use the size
    // for previous one. It might create a fairness issue that expiration
    // might happen for smaller writes but larger writes can go through.
    // Can optimize it if it is an issue.
    status = DelayWrite(last_batch_group_size_, write_options);
892
    PERF_TIMER_START(write_pre_and_post_process_time);
S
Siying Dong 已提交
893 894
  }

895
  if (status.ok() && *need_log_sync) {
896 897 898 899 900 901 902
    // Wait until the parallel syncs are finished. Any sync process has to sync
    // the front log too so it is enough to check the status of front()
    // We do a while loop since log_sync_cv_ is signalled when any sync is
    // finished
    // Note: there does not seem to be a reason to wait for parallel sync at
    // this early step but it is not important since parallel sync (SyncWAL) and
    // need_log_sync are usually not used together.
S
Siying Dong 已提交
903 904 905 906 907
    while (logs_.front().getting_synced) {
      log_sync_cv_.Wait();
    }
    for (auto& log : logs_) {
      assert(!log.getting_synced);
908 909 910 911 912
      // This is just to prevent the logs to be synced by a parallel SyncWAL
      // call. We will do the actual syncing later after we will write to the
      // WAL.
      // Note: there does not seem to be a reason to set this early before we
      // actually write to the WAL
S
Siying Dong 已提交
913 914
      log.getting_synced = true;
    }
915 916
  } else {
    *need_log_sync = false;
S
Siying Dong 已提交
917 918 919 920 921
  }

  return status;
}

922
WriteBatch* DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group,
923 924
                               WriteBatch* tmp_batch, size_t* write_with_wal,
                               WriteBatch** to_be_cached_state) {
925 926
  assert(write_with_wal != nullptr);
  assert(tmp_batch != nullptr);
927
  assert(*to_be_cached_state == nullptr);
S
Siying Dong 已提交
928
  WriteBatch* merged_batch = nullptr;
929
  *write_with_wal = 0;
930
  auto* leader = write_group.leader;
931 932
  assert(!leader->disable_wal);  // Same holds for all in the batch group
  if (write_group.size == 1 && !leader->CallbackFailed() &&
933
      leader->batch->GetWalTerminationPoint().is_cleared()) {
S
Siying Dong 已提交
934 935 936
    // we simply write the first WriteBatch to WAL if the group only
    // contains one batch, that batch should be written to the WAL,
    // and the batch is not wanting to be truncated
937
    merged_batch = leader->batch;
938 939 940
    if (WriteBatchInternal::IsLatestPersistentState(merged_batch)) {
      *to_be_cached_state = merged_batch;
    }
941
    *write_with_wal = 1;
S
Siying Dong 已提交
942 943 944 945
  } else {
    // WAL needs all of the batches flattened into a single batch.
    // We could avoid copying here with an iov-like AddRecord
    // interface
946
    merged_batch = tmp_batch;
S
Siying Dong 已提交
947
    for (auto writer : write_group) {
948
      if (!writer->CallbackFailed()) {
S
Siying Dong 已提交
949 950
        WriteBatchInternal::Append(merged_batch, writer->batch,
                                   /*WAL_only*/ true);
951 952 953 954
        if (WriteBatchInternal::IsLatestPersistentState(writer->batch)) {
          // We only need to cache the last of such write batch
          *to_be_cached_state = writer->batch;
        }
955
        (*write_with_wal)++;
S
Siying Dong 已提交
956 957 958
      }
    }
  }
959 960
  return merged_batch;
}
S
Siying Dong 已提交
961

962
// When two_write_queues_ is disabled, this function is called from the only
963
// write thread. Otherwise this must be called holding log_write_mutex_.
964 965 966 967 968 969
Status DBImpl::WriteToWAL(const WriteBatch& merged_batch,
                          log::Writer* log_writer, uint64_t* log_used,
                          uint64_t* log_size) {
  assert(log_size != nullptr);
  Slice log_entry = WriteBatchInternal::Contents(&merged_batch);
  *log_size = log_entry.size();
970 971 972 973 974 975 976 977 978 979 980
  // When two_write_queues_ WriteToWAL has to be protected from concurretn calls
  // from the two queues anyway and log_write_mutex_ is already held. Otherwise
  // if manual_wal_flush_ is enabled we need to protect log_writer->AddRecord
  // from possible concurrent calls via the FlushWAL by the application.
  const bool needs_locking = manual_wal_flush_ && !two_write_queues_;
  // Due to performance cocerns of missed branch prediction penalize the new
  // manual_wal_flush_ feature (by UNLIKELY) instead of the more common case
  // when we do not need any locking.
  if (UNLIKELY(needs_locking)) {
    log_write_mutex_.Lock();
  }
981
  Status status = log_writer->AddRecord(log_entry);
982 983 984
  if (UNLIKELY(needs_locking)) {
    log_write_mutex_.Unlock();
  }
985 986 987
  if (log_used != nullptr) {
    *log_used = logfile_number_;
  }
S
Siying Dong 已提交
988
  total_log_size_ += log_entry.size();
989 990
  // TODO(myabandeh): it might be unsafe to access alive_log_files_.back() here
  // since alive_log_files_ might be modified concurrently
S
Siying Dong 已提交
991 992
  alive_log_files_.back().AddSize(log_entry.size());
  log_empty_ = false;
993 994 995 996 997 998 999 1000 1001
  return status;
}

Status DBImpl::WriteToWAL(const WriteThread::WriteGroup& write_group,
                          log::Writer* log_writer, uint64_t* log_used,
                          bool need_log_sync, bool need_log_dir_sync,
                          SequenceNumber sequence) {
  Status status;

1002 1003
  assert(!write_group.leader->disable_wal);
  // Same holds for all in the batch group
1004
  size_t write_with_wal = 0;
1005 1006 1007
  WriteBatch* to_be_cached_state = nullptr;
  WriteBatch* merged_batch = MergeBatch(write_group, &tmp_batch_,
                                        &write_with_wal, &to_be_cached_state);
M
Maysam Yabandeh 已提交
1008 1009 1010 1011 1012 1013 1014
  if (merged_batch == write_group.leader->batch) {
    write_group.leader->log_used = logfile_number_;
  } else if (write_with_wal > 1) {
    for (auto writer : write_group) {
      writer->log_used = logfile_number_;
    }
  }
1015 1016 1017 1018 1019

  WriteBatchInternal::SetSequence(merged_batch, sequence);

  uint64_t log_size;
  status = WriteToWAL(*merged_batch, log_writer, log_used, &log_size);
1020 1021
  if (to_be_cached_state) {
    cached_recoverable_state_ = *to_be_cached_state;
1022
    cached_recoverable_state_empty_ = false;
1023
  }
S
Siying Dong 已提交
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053

  if (status.ok() && need_log_sync) {
    StopWatch sw(env_, stats_, WAL_FILE_SYNC_MICROS);
    // It's safe to access logs_ with unlocked mutex_ here because:
    //  - we've set getting_synced=true for all logs,
    //    so other threads won't pop from logs_ while we're here,
    //  - only writer thread can push to logs_, and we're in
    //    writer thread, so no one will push to logs_,
    //  - as long as other threads don't modify it, it's safe to read
    //    from std::deque from multiple threads concurrently.
    for (auto& log : logs_) {
      status = log.writer->file()->Sync(immutable_db_options_.use_fsync);
      if (!status.ok()) {
        break;
      }
    }
    if (status.ok() && need_log_dir_sync) {
      // We only sync WAL directory the first time WAL syncing is
      // requested, so that in case users never turn on WAL sync,
      // we can avoid the disk I/O in the write code path.
      status = directories_.GetWalDir()->Fsync();
    }
  }

  if (merged_batch == &tmp_batch_) {
    tmp_batch_.Clear();
  }
  if (status.ok()) {
    auto stats = default_cf_internal_stats_;
    if (need_log_sync) {
1054
      stats->AddDBStats(InternalStats::kIntStatsWalFileSynced, 1);
S
Siying Dong 已提交
1055 1056
      RecordTick(stats_, WAL_FILE_SYNCED);
    }
1057
    stats->AddDBStats(InternalStats::kIntStatsWalFileBytes, log_size);
S
Siying Dong 已提交
1058
    RecordTick(stats_, WAL_FILE_BYTES, log_size);
1059
    stats->AddDBStats(InternalStats::kIntStatsWriteWithWal, write_with_wal);
S
Siying Dong 已提交
1060 1061 1062 1063 1064
    RecordTick(stats_, WRITE_WITH_WAL, write_with_wal);
  }
  return status;
}

1065 1066 1067
Status DBImpl::ConcurrentWriteToWAL(const WriteThread::WriteGroup& write_group,
                                    uint64_t* log_used,
                                    SequenceNumber* last_sequence,
1068
                                    size_t seq_inc) {
1069 1070
  Status status;

1071 1072
  assert(!write_group.leader->disable_wal);
  // Same holds for all in the batch group
1073 1074
  WriteBatch tmp_batch;
  size_t write_with_wal = 0;
1075
  WriteBatch* to_be_cached_state = nullptr;
1076
  WriteBatch* merged_batch =
1077
      MergeBatch(write_group, &tmp_batch, &write_with_wal, &to_be_cached_state);
1078 1079 1080 1081

  // We need to lock log_write_mutex_ since logs_ and alive_log_files might be
  // pushed back concurrently
  log_write_mutex_.Lock();
M
Maysam Yabandeh 已提交
1082 1083 1084 1085 1086 1087 1088
  if (merged_batch == write_group.leader->batch) {
    write_group.leader->log_used = logfile_number_;
  } else if (write_with_wal > 1) {
    for (auto writer : write_group) {
      writer->log_used = logfile_number_;
    }
  }
1089
  *last_sequence = versions_->FetchAddLastAllocatedSequence(seq_inc);
1090 1091 1092 1093 1094 1095
  auto sequence = *last_sequence + 1;
  WriteBatchInternal::SetSequence(merged_batch, sequence);

  log::Writer* log_writer = logs_.back().writer;
  uint64_t log_size;
  status = WriteToWAL(*merged_batch, log_writer, log_used, &log_size);
1096 1097
  if (to_be_cached_state) {
    cached_recoverable_state_ = *to_be_cached_state;
1098
    cached_recoverable_state_empty_ = false;
1099
  }
1100 1101 1102
  log_write_mutex_.Unlock();

  if (status.ok()) {
1103
    const bool concurrent = true;
1104
    auto stats = default_cf_internal_stats_;
1105 1106
    stats->AddDBStats(InternalStats::kIntStatsWalFileBytes, log_size,
                      concurrent);
1107
    RecordTick(stats_, WAL_FILE_BYTES, log_size);
1108
    stats->AddDBStats(InternalStats::kIntStatsWriteWithWal, write_with_wal,
1109
                      concurrent);
1110 1111 1112 1113 1114
    RecordTick(stats_, WRITE_WITH_WAL, write_with_wal);
  }
  return status;
}

1115 1116 1117 1118 1119
Status DBImpl::WriteRecoverableState() {
  mutex_.AssertHeld();
  if (!cached_recoverable_state_empty_) {
    bool dont_care_bool;
    SequenceNumber next_seq;
1120
    if (two_write_queues_) {
1121 1122
      log_write_mutex_.Lock();
    }
1123 1124 1125 1126 1127 1128
    SequenceNumber seq;
    if (two_write_queues_) {
      seq = versions_->FetchAddLastAllocatedSequence(0);
    } else {
      seq = versions_->LastSequence();
    }
1129
    WriteBatchInternal::SetSequence(&cached_recoverable_state_, seq + 1);
1130 1131
    auto status = WriteBatchInternal::InsertInto(
        &cached_recoverable_state_, column_family_memtables_.get(),
1132 1133 1134
        &flush_scheduler_, &trim_history_scheduler_, true,
        0 /*recovery_log_number*/, this, false /* concurrent_memtable_writes */,
        &next_seq, &dont_care_bool, seq_per_batch_);
1135 1136 1137
    auto last_seq = next_seq - 1;
    if (two_write_queues_) {
      versions_->FetchAddLastAllocatedSequence(last_seq - seq);
1138
      versions_->SetLastPublishedSequence(last_seq);
1139 1140
    }
    versions_->SetLastSequence(last_seq);
1141
    if (two_write_queues_) {
1142 1143
      log_write_mutex_.Unlock();
    }
1144 1145 1146 1147
    if (status.ok() && recoverable_state_pre_release_callback_) {
      const bool DISABLE_MEMTABLE = true;
      for (uint64_t sub_batch_seq = seq + 1;
           sub_batch_seq < next_seq && status.ok(); sub_batch_seq++) {
1148
        uint64_t const no_log_num = 0;
1149 1150 1151
        // Unlock it since the callback might end up locking mutex. e.g.,
        // AddCommitted -> AdvanceMaxEvictedSeq -> GetSnapshotListFromDB
        mutex_.Unlock();
1152
        status = recoverable_state_pre_release_callback_->Callback(
1153
            sub_batch_seq, !DISABLE_MEMTABLE, no_log_num, 0, 1);
1154
        mutex_.Lock();
1155 1156
      }
    }
1157 1158 1159 1160 1161 1162 1163 1164 1165
    if (status.ok()) {
      cached_recoverable_state_.Clear();
      cached_recoverable_state_empty_ = true;
    }
    return status;
  }
  return Status::OK();
}

Y
Yanqin Jin 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
void DBImpl::SelectColumnFamiliesForAtomicFlush(
    autovector<ColumnFamilyData*>* cfds) {
  for (ColumnFamilyData* cfd : *versions_->GetColumnFamilySet()) {
    if (cfd->IsDropped()) {
      continue;
    }
    if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
        !cached_recoverable_state_empty_.load()) {
      cfds->push_back(cfd);
    }
  }
}

// Assign sequence number for atomic flush.
void DBImpl::AssignAtomicFlushSeq(const autovector<ColumnFamilyData*>& cfds) {
1181
  assert(immutable_db_options_.atomic_flush);
Y
Yanqin Jin 已提交
1182 1183 1184 1185 1186 1187
  auto seq = versions_->LastSequence();
  for (auto cfd : cfds) {
    cfd->imm()->AssignAtomicFlushSeq(seq);
  }
}

1188
Status DBImpl::SwitchWAL(WriteContext* write_context) {
S
Siying Dong 已提交
1189 1190 1191 1192 1193 1194 1195 1196 1197
  mutex_.AssertHeld();
  assert(write_context != nullptr);
  Status status;

  if (alive_log_files_.begin()->getting_flushed) {
    return status;
  }

  auto oldest_alive_log = alive_log_files_.begin()->number;
S
Siying Dong 已提交
1198 1199
  bool flush_wont_release_oldest_log = false;
  if (allow_2pc()) {
F
Faustin Lammler 已提交
1200
    auto oldest_log_with_uncommitted_prep =
S
Siying Dong 已提交
1201 1202
        logs_with_prep_tracker_.FindMinLogContainingOutstandingPrep();

F
Faustin Lammler 已提交
1203 1204 1205 1206
    assert(oldest_log_with_uncommitted_prep == 0 ||
           oldest_log_with_uncommitted_prep >= oldest_alive_log);
    if (oldest_log_with_uncommitted_prep > 0 &&
        oldest_log_with_uncommitted_prep == oldest_alive_log) {
S
Siying Dong 已提交
1207
      if (unable_to_release_oldest_log_) {
S
Siying Dong 已提交
1208
        // we already attempted to flush all column families dependent on
F
Faustin Lammler 已提交
1209
        // the oldest alive log but the log still contained uncommitted
S
Siying Dong 已提交
1210
        // transactions so there is still nothing that we can do.
S
Siying Dong 已提交
1211
        return status;
S
Siying Dong 已提交
1212 1213 1214
      } else {
        ROCKS_LOG_WARN(
            immutable_db_options_.info_log,
F
Faustin Lammler 已提交
1215
            "Unable to release oldest log due to uncommitted transaction");
S
Siying Dong 已提交
1216 1217 1218
        unable_to_release_oldest_log_ = true;
        flush_wont_release_oldest_log = true;
      }
S
Siying Dong 已提交
1219
    }
S
Siying Dong 已提交
1220 1221
  }
  if (!flush_wont_release_oldest_log) {
S
Siying Dong 已提交
1222 1223
    // we only mark this log as getting flushed if we have successfully
    // flushed all data in this log. If this log contains outstanding prepared
1224 1225
    // transactions then we cannot flush this log until those transactions are
    // commited.
S
Siying Dong 已提交
1226
    unable_to_release_oldest_log_ = false;
S
Siying Dong 已提交
1227 1228 1229
    alive_log_files_.begin()->getting_flushed = true;
  }

1230 1231 1232 1233 1234
  ROCKS_LOG_INFO(
      immutable_db_options_.info_log,
      "Flushing all column families with data in WAL number %" PRIu64
      ". Total log size is %" PRIu64 " while max_total_wal_size is %" PRIu64,
      oldest_alive_log, total_log_size_.load(), GetMaxTotalWalSize());
S
Siying Dong 已提交
1235 1236
  // no need to refcount because drop is happening in write thread, so can't
  // happen while we're in the write thread
Y
Yanqin Jin 已提交
1237
  autovector<ColumnFamilyData*> cfds;
1238
  if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
1239 1240 1241 1242 1243
    SelectColumnFamiliesForAtomicFlush(&cfds);
  } else {
    for (auto cfd : *versions_->GetColumnFamilySet()) {
      if (cfd->IsDropped()) {
        continue;
S
Siying Dong 已提交
1244
      }
Y
Yanqin Jin 已提交
1245 1246 1247 1248
      if (cfd->OldestLogToKeep() <= oldest_alive_log) {
        cfds.push_back(cfd);
      }
    }
1249
    MaybeFlushStatsCF(&cfds);
Y
Yanqin Jin 已提交
1250
  }
1251 1252 1253 1254 1255
  WriteThread::Writer nonmem_w;
  if (two_write_queues_) {
    nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
  }

Y
Yanqin Jin 已提交
1256 1257 1258
  for (const auto cfd : cfds) {
    cfd->Ref();
    status = SwitchMemtable(cfd, write_context);
1259
    cfd->UnrefAndTryDelete();
Y
Yanqin Jin 已提交
1260 1261
    if (!status.ok()) {
      break;
S
Siying Dong 已提交
1262 1263
    }
  }
1264 1265 1266 1267
  if (two_write_queues_) {
    nonmem_write_thread_.ExitUnbatched(&nonmem_w);
  }

1268
  if (status.ok()) {
1269
    if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
1270 1271 1272 1273 1274 1275 1276
      AssignAtomicFlushSeq(cfds);
    }
    for (auto cfd : cfds) {
      cfd->imm()->FlushRequested();
    }
    FlushRequest flush_req;
    GenerateFlushRequest(cfds, &flush_req);
1277 1278 1279
    SchedulePendingFlush(flush_req, FlushReason::kWriteBufferManager);
    MaybeScheduleFlushOrCompaction();
  }
S
Siying Dong 已提交
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
  return status;
}

Status DBImpl::HandleWriteBufferFull(WriteContext* write_context) {
  mutex_.AssertHeld();
  assert(write_context != nullptr);
  Status status;

  // Before a new memtable is added in SwitchMemtable(),
  // write_buffer_manager_->ShouldFlush() will keep returning true. If another
  // thread is writing to another DB with the same write buffer, they may also
  // be flushed. We may end up with flushing much more DBs than needed. It's
  // suboptimal but still correct.
  ROCKS_LOG_INFO(
      immutable_db_options_.info_log,
      "Flushing column family with largest mem table size. Write buffer is "
1296
      "using %" ROCKSDB_PRIszt " bytes out of a total of %" ROCKSDB_PRIszt ".",
S
Siying Dong 已提交
1297 1298 1299 1300
      write_buffer_manager_->memory_usage(),
      write_buffer_manager_->buffer_size());
  // no need to refcount because drop is happening in write thread, so can't
  // happen while we're in the write thread
Y
Yanqin Jin 已提交
1301
  autovector<ColumnFamilyData*> cfds;
1302
  if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
1303 1304 1305 1306
    SelectColumnFamiliesForAtomicFlush(&cfds);
  } else {
    ColumnFamilyData* cfd_picked = nullptr;
    SequenceNumber seq_num_for_cf_picked = kMaxSequenceNumber;
S
Siying Dong 已提交
1307

Y
Yanqin Jin 已提交
1308 1309 1310
    for (auto cfd : *versions_->GetColumnFamilySet()) {
      if (cfd->IsDropped()) {
        continue;
S
Siying Dong 已提交
1311
      }
Y
Yanqin Jin 已提交
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
      if (!cfd->mem()->IsEmpty()) {
        // We only consider active mem table, hoping immutable memtable is
        // already in the process of flushing.
        uint64_t seq = cfd->mem()->GetCreationSeq();
        if (cfd_picked == nullptr || seq < seq_num_for_cf_picked) {
          cfd_picked = cfd;
          seq_num_for_cf_picked = seq;
        }
      }
    }
    if (cfd_picked != nullptr) {
      cfds.push_back(cfd_picked);
S
Siying Dong 已提交
1324
    }
1325
    MaybeFlushStatsCF(&cfds);
S
Siying Dong 已提交
1326
  }
1327

1328 1329 1330 1331
  WriteThread::Writer nonmem_w;
  if (two_write_queues_) {
    nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
  }
1332
  for (const auto cfd : cfds) {
1333 1334 1335
    if (cfd->mem()->IsEmpty()) {
      continue;
    }
1336 1337
    cfd->Ref();
    status = SwitchMemtable(cfd, write_context);
1338
    cfd->UnrefAndTryDelete();
1339 1340
    if (!status.ok()) {
      break;
S
Siying Dong 已提交
1341
    }
1342
  }
1343 1344 1345 1346
  if (two_write_queues_) {
    nonmem_write_thread_.ExitUnbatched(&nonmem_w);
  }

1347
  if (status.ok()) {
1348
    if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
1349 1350 1351 1352 1353 1354 1355
      AssignAtomicFlushSeq(cfds);
    }
    for (const auto cfd : cfds) {
      cfd->imm()->FlushRequested();
    }
    FlushRequest flush_req;
    GenerateFlushRequest(cfds, &flush_req);
1356 1357
    SchedulePendingFlush(flush_req, FlushReason::kWriteBufferFull);
    MaybeScheduleFlushOrCompaction();
S
Siying Dong 已提交
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
  }
  return status;
}

uint64_t DBImpl::GetMaxTotalWalSize() const {
  mutex_.AssertHeld();
  return mutable_db_options_.max_total_wal_size == 0
             ? 4 * max_total_in_memory_state_
             : mutable_db_options_.max_total_wal_size;
}

// REQUIRES: mutex_ is held
// REQUIRES: this thread is currently at the front of the writer queue
Status DBImpl::DelayWrite(uint64_t num_bytes,
                          const WriteOptions& write_options) {
  uint64_t time_delayed = 0;
  bool delayed = false;
  {
    StopWatch sw(env_, stats_, WRITE_STALL, &time_delayed);
    uint64_t delay = write_controller_.GetDelay(env_, num_bytes);
    if (delay > 0) {
      if (write_options.no_slowdown) {
1380
        return Status::Incomplete("Write stall");
S
Siying Dong 已提交
1381 1382 1383
      }
      TEST_SYNC_POINT("DBImpl::DelayWrite:Sleep");

1384 1385 1386 1387
      // Notify write_thread_ about the stall so it can setup a barrier and
      // fail any pending writers with no_slowdown
      write_thread_.BeginWriteStall();
      TEST_SYNC_POINT("DBImpl::DelayWrite:BeginWriteStallDone");
S
Siying Dong 已提交
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
      mutex_.Unlock();
      // We will delay the write until we have slept for delay ms or
      // we don't need a delay anymore
      const uint64_t kDelayInterval = 1000;
      uint64_t stall_end = sw.start_time() + delay;
      while (write_controller_.NeedsDelay()) {
        if (env_->NowMicros() >= stall_end) {
          // We already delayed this write `delay` microseconds
          break;
        }

        delayed = true;
        // Sleep for 0.001 seconds
        env_->SleepForMicroseconds(kDelayInterval);
      }
      mutex_.Lock();
1404
      write_thread_.EndWriteStall();
S
Siying Dong 已提交
1405 1406
    }

1407 1408 1409 1410 1411
    // Don't wait if there's a background error, even if its a soft error. We
    // might wait here indefinitely as the background compaction may never
    // finish successfully, resulting in the stall condition lasting
    // indefinitely
    while (error_handler_.GetBGError().ok() && write_controller_.IsStopped()) {
S
Siying Dong 已提交
1412
      if (write_options.no_slowdown) {
1413
        return Status::Incomplete("Write stall");
S
Siying Dong 已提交
1414 1415
      }
      delayed = true;
1416 1417 1418 1419

      // Notify write_thread_ about the stall so it can setup a barrier and
      // fail any pending writers with no_slowdown
      write_thread_.BeginWriteStall();
S
Siying Dong 已提交
1420 1421
      TEST_SYNC_POINT("DBImpl::DelayWrite:Wait");
      bg_cv_.Wait();
1422
      write_thread_.EndWriteStall();
S
Siying Dong 已提交
1423 1424 1425 1426
    }
  }
  assert(!delayed || !write_options.no_slowdown);
  if (delayed) {
1427 1428
    default_cf_internal_stats_->AddDBStats(
        InternalStats::kIntStatsWriteStallMicros, time_delayed);
S
Siying Dong 已提交
1429 1430 1431
    RecordTick(stats_, STALL_MICROS, time_delayed);
  }

1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
  // If DB is not in read-only mode and write_controller is not stopping
  // writes, we can ignore any background errors and allow the write to
  // proceed
  Status s;
  if (write_controller_.IsStopped()) {
    // If writes are still stopped, it means we bailed due to a background
    // error
    s = Status::Incomplete(error_handler_.GetBGError().ToString());
  }
  if (error_handler_.IsDBStopped()) {
    s = error_handler_.GetBGError();
  }
  return s;
S
Siying Dong 已提交
1445 1446
}

1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
Status DBImpl::ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
                                            WriteBatch* my_batch) {
  assert(write_options.low_pri);
  // This is called outside the DB mutex. Although it is safe to make the call,
  // the consistency condition is not guaranteed to hold. It's OK to live with
  // it in this case.
  // If we need to speed compaction, it means the compaction is left behind
  // and we start to limit low pri writes to a limit.
  if (write_controller_.NeedSpeedupCompaction()) {
    if (allow_2pc() && (my_batch->HasCommit() || my_batch->HasRollback())) {
      // For 2PC, we only rate limit prepare, not commit.
      return Status::OK();
    }
    if (write_options.no_slowdown) {
      return Status::Incomplete();
    } else {
      assert(my_batch != nullptr);
      // Rate limit those writes. The reason that we don't completely wait
      // is that in case the write is heavy, low pri writes may never have
      // a chance to run. Now we guarantee we are still slowly making
      // progress.
1468
      PERF_TIMER_GUARD(write_delay_time);
1469 1470 1471
      write_controller_.low_pri_rate_limiter()->Request(
          my_batch->GetDataSize(), Env::IO_HIGH, nullptr /* stats */,
          RateLimiter::OpType::kWrite);
1472 1473 1474 1475 1476
    }
  }
  return Status::OK();
}

1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
void DBImpl::MaybeFlushStatsCF(autovector<ColumnFamilyData*>* cfds) {
  assert(cfds != nullptr);
  if (!cfds->empty() && immutable_db_options_.persist_stats_to_disk) {
    ColumnFamilyData* cfd_stats =
        versions_->GetColumnFamilySet()->GetColumnFamily(
            kPersistentStatsColumnFamilyName);
    if (cfd_stats != nullptr && !cfd_stats->mem()->IsEmpty()) {
      for (ColumnFamilyData* cfd : *cfds) {
        if (cfd == cfd_stats) {
          // stats CF already included in cfds
          return;
        }
      }
      // force flush stats CF when its log number is less than all other CF's
      // log numbers
      bool force_flush_stats_cf = true;
      for (auto* loop_cfd : *versions_->GetColumnFamilySet()) {
        if (loop_cfd == cfd_stats) {
          continue;
        }
        if (loop_cfd->GetLogNumber() <= cfd_stats->GetLogNumber()) {
          force_flush_stats_cf = false;
        }
      }
      if (force_flush_stats_cf) {
        cfds->push_back(cfd_stats);
        ROCKS_LOG_INFO(immutable_db_options_.info_log,
                       "Force flushing stats CF with automated flush "
                       "to avoid holding old logs");
      }
    }
  }
}

1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
Status DBImpl::TrimMemtableHistory(WriteContext* context) {
  autovector<ColumnFamilyData*> cfds;
  ColumnFamilyData* tmp_cfd;
  while ((tmp_cfd = trim_history_scheduler_.TakeNextColumnFamily()) !=
         nullptr) {
    cfds.push_back(tmp_cfd);
  }
  for (auto& cfd : cfds) {
    autovector<MemTable*> to_delete;
    cfd->imm()->TrimHistory(&to_delete, cfd->mem()->ApproximateMemoryUsage());
    for (auto m : to_delete) {
      delete m;
    }
    context->superversion_context.NewSuperVersion();
    assert(context->superversion_context.new_superversion.get() != nullptr);
    cfd->InstallSuperVersion(&context->superversion_context, &mutex_);

1528
    if (cfd->UnrefAndTryDelete()) {
1529 1530 1531 1532 1533 1534
      cfd = nullptr;
    }
  }
  return Status::OK();
}

S
Siying Dong 已提交
1535
Status DBImpl::ScheduleFlushes(WriteContext* context) {
Y
Yanqin Jin 已提交
1536
  autovector<ColumnFamilyData*> cfds;
1537
  if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
    SelectColumnFamiliesForAtomicFlush(&cfds);
    for (auto cfd : cfds) {
      cfd->Ref();
    }
    flush_scheduler_.Clear();
  } else {
    ColumnFamilyData* tmp_cfd;
    while ((tmp_cfd = flush_scheduler_.TakeNextColumnFamily()) != nullptr) {
      cfds.push_back(tmp_cfd);
    }
1548
    MaybeFlushStatsCF(&cfds);
Y
Yanqin Jin 已提交
1549
  }
1550
  Status status;
1551 1552 1553 1554 1555
  WriteThread::Writer nonmem_w;
  if (two_write_queues_) {
    nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
  }

Y
Yanqin Jin 已提交
1556
  for (auto& cfd : cfds) {
1557 1558 1559
    if (!cfd->mem()->IsEmpty()) {
      status = SwitchMemtable(cfd, context);
    }
1560
    if (cfd->UnrefAndTryDelete()) {
Y
Yanqin Jin 已提交
1561
      cfd = nullptr;
S
Siying Dong 已提交
1562 1563
    }
    if (!status.ok()) {
1564 1565
      break;
    }
S
Siying Dong 已提交
1566
  }
1567 1568 1569 1570 1571

  if (two_write_queues_) {
    nonmem_write_thread_.ExitUnbatched(&nonmem_w);
  }

1572
  if (status.ok()) {
1573
    if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
1574 1575 1576 1577
      AssignAtomicFlushSeq(cfds);
    }
    FlushRequest flush_req;
    GenerateFlushRequest(cfds, &flush_req);
1578 1579 1580 1581
    SchedulePendingFlush(flush_req, FlushReason::kWriteBufferFull);
    MaybeScheduleFlushOrCompaction();
  }
  return status;
S
Siying Dong 已提交
1582 1583 1584
}

#ifndef ROCKSDB_LITE
A
Andrew Kryczka 已提交
1585
void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/,
S
Siying Dong 已提交
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
                                    const MemTableInfo& mem_table_info) {
  if (immutable_db_options_.listeners.size() == 0U) {
    return;
  }
  if (shutting_down_.load(std::memory_order_acquire)) {
    return;
  }

  for (auto listener : immutable_db_options_.listeners) {
    listener->OnMemTableSealed(mem_table_info);
  }
}
#endif  // ROCKSDB_LITE

// REQUIRES: mutex_ is held
// REQUIRES: this thread is currently at the front of the writer queue
1602 1603
// REQUIRES: this thread is currently at the front of the 2nd writer queue if
// two_write_queues_ is true (This is to simplify the reasoning.)
1604
Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
S
Siying Dong 已提交
1605
  mutex_.AssertHeld();
M
Maysam Yabandeh 已提交
1606
  WriteThread::Writer nonmem_w;
1607
  std::unique_ptr<WritableFile> lfile;
S
Siying Dong 已提交
1608 1609 1610
  log::Writer* new_log = nullptr;
  MemTable* new_mem = nullptr;

1611 1612 1613 1614 1615 1616 1617
  // Recoverable state is persisted in WAL. After memtable switch, WAL might
  // be deleted, so we write the state to memtable to be persisted as well.
  Status s = WriteRecoverableState();
  if (!s.ok()) {
    return s;
  }

S
Siying Dong 已提交
1618 1619 1620
  // Attempt to switch to a new memtable and trigger flush of old.
  // Do this without holding the dbmutex lock.
  assert(versions_->prev_log_number() == 0);
1621
  if (two_write_queues_) {
1622 1623
    log_write_mutex_.Lock();
  }
S
Siying Dong 已提交
1624
  bool creating_new_log = !log_empty_;
1625
  if (two_write_queues_) {
1626 1627
    log_write_mutex_.Unlock();
  }
S
Siying Dong 已提交
1628 1629
  uint64_t recycle_log_number = 0;
  if (creating_new_log && immutable_db_options_.recycle_log_file_num &&
1630 1631 1632
      !log_recycle_files_.empty()) {
    recycle_log_number = log_recycle_files_.front();
    log_recycle_files_.pop_front();
S
Siying Dong 已提交
1633 1634 1635 1636 1637
  }
  uint64_t new_log_number =
      creating_new_log ? versions_->NewFileNumber() : logfile_number_;
  const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions();

F
follitude 已提交
1638
  // Set memtable_info for memtable sealed callback
S
Siying Dong 已提交
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
#ifndef ROCKSDB_LITE
  MemTableInfo memtable_info;
  memtable_info.cf_name = cfd->GetName();
  memtable_info.first_seqno = cfd->mem()->GetFirstSequenceNumber();
  memtable_info.earliest_seqno = cfd->mem()->GetEarliestSequenceNumber();
  memtable_info.num_entries = cfd->mem()->num_entries();
  memtable_info.num_deletes = cfd->mem()->num_deletes();
#endif  // ROCKSDB_LITE
  // Log this later after lock release. It may be outdated, e.g., if background
  // flush happens before logging, but that should be ok.
  int num_imm_unflushed = cfd->imm()->NumNotFlushed();
  const auto preallocate_block_size =
1651
      GetWalPreallocateBlockSize(mutable_cf_options.write_buffer_size);
S
Siying Dong 已提交
1652
  mutex_.Unlock();
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
  if (creating_new_log) {
    // TODO: Write buffer size passed in should be max of all CF's instead
    // of mutable_cf_options.write_buffer_size.
    s = CreateWAL(new_log_number, recycle_log_number, preallocate_block_size,
                  &new_log);
  }
  if (s.ok()) {
    SequenceNumber seq = versions_->LastSequence();
    new_mem = cfd->ConstructNewMemtable(mutable_cf_options, seq);
    context->superversion_context.NewSuperVersion();
S
Siying Dong 已提交
1663 1664 1665 1666 1667 1668
  }
  ROCKS_LOG_INFO(immutable_db_options_.info_log,
                 "[%s] New memtable created with log file: #%" PRIu64
                 ". Immutable memtables: %d.\n",
                 cfd->GetName().c_str(), new_log_number, num_imm_unflushed);
  mutex_.Lock();
1669
  if (s.ok() && creating_new_log) {
M
Maysam Yabandeh 已提交
1670
    log_write_mutex_.Lock();
S
Siying Dong 已提交
1671
    assert(new_log != nullptr);
1672 1673 1674
    if (!logs_.empty()) {
      // Alway flush the buffer of the last log before switching to a new one
      log::Writer* cur_log_writer = logs_.back().writer;
1675 1676 1677 1678
      s = cur_log_writer->WriteBuffer();
      if (!s.ok()) {
        ROCKS_LOG_WARN(immutable_db_options_.info_log,
                       "[%s] Failed to switch from #%" PRIu64 " to #%" PRIu64
1679
                       "  WAL file\n",
1680 1681 1682
                       cfd->GetName().c_str(), cur_log_writer->get_log_number(),
                       new_log_number);
      }
1683
    }
1684 1685 1686 1687 1688 1689 1690
    if (s.ok()) {
      logfile_number_ = new_log_number;
      log_empty_ = true;
      log_dir_synced_ = false;
      logs_.emplace_back(logfile_number_, new_log);
      alive_log_files_.push_back(LogFileNumberSize(logfile_number_));
    }
1691
    log_write_mutex_.Unlock();
S
Siying Dong 已提交
1692
  }
1693 1694 1695 1696

  if (!s.ok()) {
    // how do we fail if we're not creating new log?
    assert(creating_new_log);
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
    if (new_mem) {
      delete new_mem;
    }
    if (new_log) {
      delete new_log;
    }
    SuperVersion* new_superversion =
        context->superversion_context.new_superversion.release();
    if (new_superversion != nullptr) {
      delete new_superversion;
    }
    // We may have lost data from the WritableFileBuffer in-memory buffer for
    // the current log, so treat it as a fatal error and set bg_error
    error_handler_.SetBGError(s, BackgroundErrorReason::kMemTable);
    // Read back bg_error in order to get the right severity
    s = error_handler_.GetBGError();
1713 1714 1715
    return s;
  }

S
Siying Dong 已提交
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
  for (auto loop_cfd : *versions_->GetColumnFamilySet()) {
    // all this is just optimization to delete logs that
    // are no longer needed -- if CF is empty, that means it
    // doesn't need that particular log to stay alive, so we just
    // advance the log number. no need to persist this in the manifest
    if (loop_cfd->mem()->GetFirstSequenceNumber() == 0 &&
        loop_cfd->imm()->NumNotFlushed() == 0) {
      if (creating_new_log) {
        loop_cfd->SetLogNumber(logfile_number_);
      }
      loop_cfd->mem()->SetCreationSeq(versions_->LastSequence());
    }
  }

  cfd->mem()->SetNextLogNumber(logfile_number_);
  cfd->imm()->Add(cfd->mem(), &context->memtables_to_free_);
  new_mem->Ref();
  cfd->SetMemtable(new_mem);
1734
  InstallSuperVersionAndScheduleWork(cfd, &context->superversion_context,
1735
                                     mutable_cf_options);
1736 1737 1738 1739 1740 1741 1742
#ifndef ROCKSDB_LITE
  mutex_.Unlock();
  // Notify client that memtable is sealed, now that we have successfully
  // installed a new memtable
  NotifyOnMemTableSealed(cfd, memtable_info);
  mutex_.Lock();
#endif  // ROCKSDB_LITE
S
Siying Dong 已提交
1743 1744 1745 1746 1747
  return s;
}

size_t DBImpl::GetWalPreallocateBlockSize(uint64_t write_buffer_size) const {
  mutex_.AssertHeld();
1748 1749
  size_t bsize =
      static_cast<size_t>(write_buffer_size / 10 + write_buffer_size);
S
Siying Dong 已提交
1750 1751 1752
  // Some users might set very high write_buffer_size and rely on
  // max_total_wal_size or other parameters to control the WAL size.
  if (mutable_db_options_.max_total_wal_size > 0) {
1753 1754
    bsize = std::min<size_t>(
        bsize, static_cast<size_t>(mutable_db_options_.max_total_wal_size));
S
Siying Dong 已提交
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
  }
  if (immutable_db_options_.db_write_buffer_size > 0) {
    bsize = std::min<size_t>(bsize, immutable_db_options_.db_write_buffer_size);
  }
  if (immutable_db_options_.write_buffer_manager &&
      immutable_db_options_.write_buffer_manager->enabled()) {
    bsize = std::min<size_t>(
        bsize, immutable_db_options_.write_buffer_manager->buffer_size());
  }

  return bsize;
}

// Default implementations of convenience methods that subclasses of DB
// can call if they wish
Status DB::Put(const WriteOptions& opt, ColumnFamilyHandle* column_family,
               const Slice& key, const Slice& value) {
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
  if (nullptr == opt.timestamp) {
    // Pre-allocate size of write batch conservatively.
    // 8 bytes are taken by header, 4 bytes for count, 1 byte for type,
    // and we allocate 11 extra bytes for key length, as well as value length.
    WriteBatch batch(key.size() + value.size() + 24);
    Status s = batch.Put(column_family, key, value);
    if (!s.ok()) {
      return s;
    }
    return Write(opt, &batch);
  }
1783 1784 1785 1786 1787 1788
  const Slice* ts = opt.timestamp;
  assert(nullptr != ts);
  size_t ts_sz = ts->size();
  WriteBatch batch(key.size() + ts_sz + value.size() + 24, /*max_bytes=*/0,
                   ts_sz);
  Status s = batch.Put(column_family, key, value);
1789 1790 1791
  if (!s.ok()) {
    return s;
  }
1792
  s = batch.AssignTimestamp(*ts);
1793 1794 1795
  if (!s.ok()) {
    return s;
  }
S
Siying Dong 已提交
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
  return Write(opt, &batch);
}

Status DB::Delete(const WriteOptions& opt, ColumnFamilyHandle* column_family,
                  const Slice& key) {
  WriteBatch batch;
  batch.Delete(column_family, key);
  return Write(opt, &batch);
}

Status DB::SingleDelete(const WriteOptions& opt,
                        ColumnFamilyHandle* column_family, const Slice& key) {
  WriteBatch batch;
  batch.SingleDelete(column_family, key);
  return Write(opt, &batch);
}

Status DB::DeleteRange(const WriteOptions& opt,
                       ColumnFamilyHandle* column_family,
                       const Slice& begin_key, const Slice& end_key) {
  WriteBatch batch;
  batch.DeleteRange(column_family, begin_key, end_key);
  return Write(opt, &batch);
}

Status DB::Merge(const WriteOptions& opt, ColumnFamilyHandle* column_family,
                 const Slice& key, const Slice& value) {
  WriteBatch batch;
1824 1825 1826 1827
  Status s = batch.Merge(column_family, key, value);
  if (!s.ok()) {
    return s;
  }
S
Siying Dong 已提交
1828 1829 1830
  return Write(opt, &batch);
}
}  // namespace rocksdb