compaction_job.cc 50.0 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
I
Igor Canadi 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
//  This source code is licensed under the BSD-style license found in the
//  LICENSE file in the root directory of this source tree. An additional grant
//  of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include "db/compaction_job.h"

#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif

#include <inttypes.h>
#include <algorithm>
18
#include <functional>
I
Igor Canadi 已提交
19
#include <list>
20 21
#include <memory>
#include <random>
22 23 24
#include <set>
#include <thread>
#include <utility>
25
#include <vector>
I
Igor Canadi 已提交
26 27 28 29

#include "db/builder.h"
#include "db/db_iter.h"
#include "db/dbformat.h"
30
#include "db/event_helpers.h"
I
Igor Canadi 已提交
31 32 33 34 35 36
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
#include "db/memtable_list.h"
#include "db/merge_context.h"
37
#include "db/merge_helper.h"
I
Igor Canadi 已提交
38 39
#include "db/version_set.h"
#include "port/likely.h"
40
#include "port/port.h"
I
Igor Canadi 已提交
41 42 43 44 45 46 47 48 49 50
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/statistics.h"
#include "rocksdb/status.h"
#include "rocksdb/table.h"
#include "table/block.h"
#include "table/block_based_table_factory.h"
#include "table/merger.h"
#include "table/table_builder.h"
#include "util/coding.h"
51
#include "util/file_reader_writer.h"
52
#include "util/iostats_context_imp.h"
I
Igor Canadi 已提交
53
#include "util/log_buffer.h"
54
#include "util/logging.h"
55
#include "util/sst_file_manager_impl.h"
I
Igor Canadi 已提交
56 57 58
#include "util/mutexlock.h"
#include "util/perf_context_imp.h"
#include "util/stop_watch.h"
59
#include "util/string_util.h"
I
Igor Canadi 已提交
60
#include "util/sync_point.h"
61
#include "util/thread_status_util.h"
I
Igor Canadi 已提交
62 63 64

namespace rocksdb {

65
// Maintains state for each sub-compaction
66
struct CompactionJob::SubcompactionState {
67
  const Compaction* compaction;
68
  std::unique_ptr<CompactionIterator> c_iter;
I
Igor Canadi 已提交
69

70 71 72 73 74
  // The boundaries of the key-range this compaction is interested in. No two
  // subcompactions may have overlapping key-ranges.
  // 'start' is inclusive, 'end' is exclusive, and nullptr means unbounded
  Slice *start, *end;

75
  // The return status of this subcompaction
76 77
  Status status;

78
  // Files produced by this subcompaction
I
Igor Canadi 已提交
79
  struct Output {
80 81
    FileMetaData meta;
    bool finished;
82
    std::shared_ptr<const TableProperties> table_properties;
I
Igor Canadi 已提交
83 84 85
  };

  // State kept for output being generated
86
  std::vector<Output> outputs;
87
  std::unique_ptr<WritableFileWriter> outfile;
I
Igor Canadi 已提交
88
  std::unique_ptr<TableBuilder> builder;
89
  Output* current_output() {
90
    if (outputs.empty()) {
91
      // This subcompaction's outptut could be empty if compaction was aborted
92 93
      // before this subcompaction had a chance to generate any output files.
      // When subcompactions are executed sequentially this is more likely and
94
      // will be particulalry likely for the later subcompactions to be empty.
95 96 97 98 99
      // Once they are run in parallel however it should be much rarer.
      return nullptr;
    } else {
      return &outputs.back();
    }
100
  }
I
Igor Canadi 已提交
101

102 103
  uint64_t current_output_file_size;

104
  // State during the subcompaction
I
Igor Canadi 已提交
105
  uint64_t total_bytes;
106 107 108
  uint64_t num_input_records;
  uint64_t num_output_records;
  CompactionJobStats compaction_job_stats;
109
  uint64_t approx_size;
110
  // An index that used to speed up ShouldStopBefore().
111 112
  size_t grandparent_index = 0;
  // The number of bytes overlapping between the current output and
113
  // grandparent files used in ShouldStopBefore().
114
  uint64_t overlapped_bytes = 0;
115
  // A flag determine whether the key has been seen in ShouldStopBefore()
116
  bool seen_key = false;
117
  std::string compression_dict;
118

119 120
  SubcompactionState(Compaction* c, Slice* _start, Slice* _end,
                     uint64_t size = 0)
121 122 123 124 125
      : compaction(c),
        start(_start),
        end(_end),
        outfile(nullptr),
        builder(nullptr),
126
        current_output_file_size(0),
127 128
        total_bytes(0),
        num_input_records(0),
129
        num_output_records(0),
130 131 132
        approx_size(size),
        grandparent_index(0),
        overlapped_bytes(0),
133 134
        seen_key(false),
        compression_dict() {
D
Dmitri Smirnov 已提交
135 136
    assert(compaction != nullptr);
  }
D
Dmitri Smirnov 已提交
137

138
  SubcompactionState(SubcompactionState&& o) { *this = std::move(o); }
D
Dmitri Smirnov 已提交
139

140
  SubcompactionState& operator=(SubcompactionState&& o) {
D
Dmitri Smirnov 已提交
141 142 143 144 145 146 147
    compaction = std::move(o.compaction);
    start = std::move(o.start);
    end = std::move(o.end);
    status = std::move(o.status);
    outputs = std::move(o.outputs);
    outfile = std::move(o.outfile);
    builder = std::move(o.builder);
148
    current_output_file_size = std::move(o.current_output_file_size);
D
Dmitri Smirnov 已提交
149 150 151
    total_bytes = std::move(o.total_bytes);
    num_input_records = std::move(o.num_input_records);
    num_output_records = std::move(o.num_output_records);
152 153
    compaction_job_stats = std::move(o.compaction_job_stats);
    approx_size = std::move(o.approx_size);
154 155 156
    grandparent_index = std::move(o.grandparent_index);
    overlapped_bytes = std::move(o.overlapped_bytes);
    seen_key = std::move(o.seen_key);
157
    compression_dict = std::move(o.compression_dict);
D
Dmitri Smirnov 已提交
158 159 160 161
    return *this;
  }

  // Because member unique_ptrs do not have these.
162
  SubcompactionState(const SubcompactionState&) = delete;
D
Dmitri Smirnov 已提交
163

164
  SubcompactionState& operator=(const SubcompactionState&) = delete;
165 166 167

  // Returns true iff we should stop building the current output
  // before processing "internal_key".
168
  bool ShouldStopBefore(const Slice& internal_key, uint64_t curr_file_size) {
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
    const InternalKeyComparator* icmp =
        &compaction->column_family_data()->internal_comparator();
    const std::vector<FileMetaData*>& grandparents = compaction->grandparents();

    // Scan to find earliest grandparent file that contains key.
    while (grandparent_index < grandparents.size() &&
           icmp->Compare(internal_key,
                         grandparents[grandparent_index]->largest.Encode()) >
               0) {
      if (seen_key) {
        overlapped_bytes += grandparents[grandparent_index]->fd.GetFileSize();
      }
      assert(grandparent_index + 1 >= grandparents.size() ||
             icmp->Compare(
                 grandparents[grandparent_index]->largest.Encode(),
184
                 grandparents[grandparent_index + 1]->smallest.Encode()) <= 0);
185 186 187 188
      grandparent_index++;
    }
    seen_key = true;

189 190
    if (overlapped_bytes + curr_file_size >
        compaction->max_compaction_bytes()) {
191 192 193 194 195 196 197
      // Too much overlap for current output; start new output
      overlapped_bytes = 0;
      return true;
    }

    return false;
  }
198 199 200 201 202
};

// Maintains state for the entire compaction
struct CompactionJob::CompactionState {
  Compaction* const compaction;
I
Igor Canadi 已提交
203

204 205
  // REQUIRED: subcompaction states are stored in order of increasing
  // key-range
206
  std::vector<CompactionJob::SubcompactionState> sub_compact_states;
207 208 209 210 211
  Status status;

  uint64_t total_bytes;
  uint64_t num_input_records;
  uint64_t num_output_records;
I
Igor Canadi 已提交
212

S
sdong 已提交
213 214 215 216 217
  explicit CompactionState(Compaction* c)
      : compaction(c),
        total_bytes(0),
        num_input_records(0),
        num_output_records(0) {}
I
Igor Canadi 已提交
218

219 220 221 222 223 224 225 226 227
  size_t NumOutputFiles() {
    size_t total = 0;
    for (auto& s : sub_compact_states) {
      total += s.outputs.size();
    }
    return total;
  }

  Slice SmallestUserKey() {
228 229 230 231
    for (const auto& sub_compact_state : sub_compact_states) {
      if (!sub_compact_state.outputs.empty() &&
          sub_compact_state.outputs[0].finished) {
        return sub_compact_state.outputs[0].meta.smallest.user_key();
232 233
      }
    }
234
    // If there is no finished output, return an empty slice.
235
    return Slice(nullptr, 0);
236 237 238
  }

  Slice LargestUserKey() {
239 240 241 242 243
    for (auto it = sub_compact_states.rbegin(); it < sub_compact_states.rend();
         ++it) {
      if (!it->outputs.empty() && it->current_output()->finished) {
        assert(it->current_output() != nullptr);
        return it->current_output()->meta.largest.user_key();
244 245
      }
    }
246
    // If there is no finished output, return an empty slice.
247
    return Slice(nullptr, 0);
248
  }
I
Igor Canadi 已提交
249 250
};

251
void CompactionJob::AggregateStatistics() {
252
  for (SubcompactionState& sc : compact_->sub_compact_states) {
253 254 255
    compact_->total_bytes += sc.total_bytes;
    compact_->num_input_records += sc.num_input_records;
    compact_->num_output_records += sc.num_output_records;
256 257 258
  }
  if (compaction_job_stats_) {
    for (SubcompactionState& sc : compact_->sub_compact_states) {
259 260 261 262 263
      compaction_job_stats_->Add(sc.compaction_job_stats);
    }
  }
}

I
Igor Canadi 已提交
264
CompactionJob::CompactionJob(
265
    int job_id, Compaction* compaction, const ImmutableDBOptions& db_options,
I
Igor Canadi 已提交
266 267 268
    const EnvOptions& env_options, VersionSet* versions,
    std::atomic<bool>* shutting_down, LogBuffer* log_buffer,
    Directory* db_directory, Directory* output_directory, Statistics* stats,
269
    InstrumentedMutex* db_mutex, Status* db_bg_error,
I
Igor Canadi 已提交
270
    std::vector<SequenceNumber> existing_snapshots,
271
    SequenceNumber earliest_write_conflict_snapshot,
272
    std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
273
    bool paranoid_file_checks, bool measure_io_stats, const std::string& dbname,
274
    CompactionJobStats* compaction_job_stats)
275 276
    : job_id_(job_id),
      compact_(new CompactionState(compaction)),
277
      compaction_job_stats_(compaction_job_stats),
I
Igor Canadi 已提交
278
      compaction_stats_(1),
279
      dbname_(dbname),
I
Igor Canadi 已提交
280 281 282 283 284 285 286
      db_options_(db_options),
      env_options_(env_options),
      env_(db_options.env),
      versions_(versions),
      shutting_down_(shutting_down),
      log_buffer_(log_buffer),
      db_directory_(db_directory),
287
      output_directory_(output_directory),
I
Igor Canadi 已提交
288
      stats_(stats),
289 290
      db_mutex_(db_mutex),
      db_bg_error_(db_bg_error),
I
Igor Canadi 已提交
291
      existing_snapshots_(std::move(existing_snapshots)),
292
      earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot),
I
Igor Canadi 已提交
293
      table_cache_(std::move(table_cache)),
I
Igor Canadi 已提交
294
      event_logger_(event_logger),
295 296
      paranoid_file_checks_(paranoid_file_checks),
      measure_io_stats_(measure_io_stats) {
297
  assert(log_buffer_ != nullptr);
298 299
  const auto* cfd = compact_->compaction->column_family_data();
  ThreadStatusUtil::SetColumnFamily(cfd, cfd->ioptions()->env,
Y
Yi Wu 已提交
300
                                    db_options_.enable_thread_tracking);
301
  ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_COMPACTION);
302
  ReportStartedCompaction(compaction);
303 304 305 306 307 308
}

CompactionJob::~CompactionJob() {
  assert(compact_ == nullptr);
  ThreadStatusUtil::ResetThreadStatus();
}
I
Igor Canadi 已提交
309

310 311
void CompactionJob::ReportStartedCompaction(
    Compaction* compaction) {
312 313
  const auto* cfd = compact_->compaction->column_family_data();
  ThreadStatusUtil::SetColumnFamily(cfd, cfd->ioptions()->env,
Y
Yi Wu 已提交
314
                                    db_options_.enable_thread_tracking);
315 316 317 318 319 320 321 322 323 324

  ThreadStatusUtil::SetThreadOperationProperty(
      ThreadStatus::COMPACTION_JOB_ID,
      job_id_);

  ThreadStatusUtil::SetThreadOperationProperty(
      ThreadStatus::COMPACTION_INPUT_OUTPUT_LEVEL,
      (static_cast<uint64_t>(compact_->compaction->start_level()) << 32) +
          compact_->compaction->output_level());

325 326 327
  // In the current design, a CompactionJob is always created
  // for non-trivial compaction.
  assert(compaction->IsTrivialMove() == false ||
328
         compaction->is_manual_compaction() == true);
329

330 331
  ThreadStatusUtil::SetThreadOperationProperty(
      ThreadStatus::COMPACTION_PROP_FLAGS,
S
sdong 已提交
332
      compaction->is_manual_compaction() +
333
          (compaction->deletion_compaction() << 1));
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

  ThreadStatusUtil::SetThreadOperationProperty(
      ThreadStatus::COMPACTION_TOTAL_INPUT_BYTES,
      compaction->CalculateTotalInputSize());

  IOSTATS_RESET(bytes_written);
  IOSTATS_RESET(bytes_read);
  ThreadStatusUtil::SetThreadOperationProperty(
      ThreadStatus::COMPACTION_BYTES_WRITTEN, 0);
  ThreadStatusUtil::SetThreadOperationProperty(
      ThreadStatus::COMPACTION_BYTES_READ, 0);

  // Set the thread operation after operation properties
  // to ensure GetThreadList() can always show them all together.
  ThreadStatusUtil::SetThreadOperation(
      ThreadStatus::OP_COMPACTION);
350 351 352

  if (compaction_job_stats_) {
    compaction_job_stats_->is_manual_compaction =
S
sdong 已提交
353
        compaction->is_manual_compaction();
354
  }
355 356
}

I
Igor Canadi 已提交
357
void CompactionJob::Prepare() {
358 359
  AutoThreadOperationStageUpdater stage_updater(
      ThreadStatus::STAGE_COMPACTION_PREPARE);
I
Igor Canadi 已提交
360 361

  // Generate file_levels_ for compaction berfore making Iterator
362 363 364 365
  auto* c = compact_->compaction;
  assert(c->column_family_data() != nullptr);
  assert(c->column_family_data()->current()->storage_info()
      ->NumLevelFiles(compact_->compaction->level()) > 0);
I
Igor Canadi 已提交
366

367
  // Is this compaction producing files at the bottommost level?
368
  bottommost_level_ = c->bottommost_level();
369

370 371 372 373 374 375 376 377 378 379 380 381 382
  if (c->ShouldFormSubcompactions()) {
    const uint64_t start_micros = env_->NowMicros();
    GenSubcompactionBoundaries();
    MeasureTime(stats_, SUBCOMPACTION_SETUP_TIME,
                env_->NowMicros() - start_micros);

    assert(sizes_.size() == boundaries_.size() + 1);

    for (size_t i = 0; i <= boundaries_.size(); i++) {
      Slice* start = i == 0 ? nullptr : &boundaries_[i - 1];
      Slice* end = i == boundaries_.size() ? nullptr : &boundaries_[i];
      compact_->sub_compact_states.emplace_back(c, start, end, sizes_[i]);
    }
383 384
    MeasureTime(stats_, NUM_SUBCOMPACTIONS_SCHEDULED,
                compact_->sub_compact_states.size());
385 386 387
  } else {
    compact_->sub_compact_states.emplace_back(c, nullptr, nullptr);
  }
388 389
}

390 391 392
struct RangeWithSize {
  Range range;
  uint64_t size;
393

394 395 396 397 398 399 400 401 402 403 404 405
  RangeWithSize(const Slice& a, const Slice& b, uint64_t s = 0)
      : range(a, b), size(s) {}
};

// Generates a histogram representing potential divisions of key ranges from
// the input. It adds the starting and/or ending keys of certain input files
// to the working set and then finds the approximate size of data in between
// each consecutive pair of slices. Then it divides these ranges into
// consecutive groups such that each group has a similar size.
void CompactionJob::GenSubcompactionBoundaries() {
  auto* c = compact_->compaction;
  auto* cfd = c->column_family_data();
406 407
  const Comparator* cfd_comparator = cfd->user_comparator();
  std::vector<Slice> bounds;
408 409 410 411
  int start_lvl = c->start_level();
  int out_lvl = c->output_level();

  // Add the starting and/or ending key of certain input files as a potential
412
  // boundary
413 414 415 416 417 418 419
  for (size_t lvl_idx = 0; lvl_idx < c->num_input_levels(); lvl_idx++) {
    int lvl = c->level(lvl_idx);
    if (lvl >= start_lvl && lvl <= out_lvl) {
      const LevelFilesBrief* flevel = c->input_levels(lvl_idx);
      size_t num_files = flevel->num_files;

      if (num_files == 0) {
420
        continue;
421
      }
A
Ari Ekmekji 已提交
422

423 424 425 426
      if (lvl == 0) {
        // For level 0 add the starting and ending key of each file since the
        // files may have greatly differing key ranges (not range-partitioned)
        for (size_t i = 0; i < num_files; i++) {
427 428
          bounds.emplace_back(flevel->files[i].smallest_key);
          bounds.emplace_back(flevel->files[i].largest_key);
429 430 431 432
        }
      } else {
        // For all other levels add the smallest/largest key in the level to
        // encompass the range covered by that level
433 434
        bounds.emplace_back(flevel->files[0].smallest_key);
        bounds.emplace_back(flevel->files[num_files - 1].largest_key);
435 436 437 438 439 440
        if (lvl == out_lvl) {
          // For the last level include the starting keys of all files since
          // the last level is the largest and probably has the widest key
          // range. Since it's range partitioned, the ending key of one file
          // and the starting key of the next are very close (or identical).
          for (size_t i = 1; i < num_files; i++) {
441
            bounds.emplace_back(flevel->files[i].smallest_key);
A
Ari Ekmekji 已提交
442
          }
443 444 445 446
        }
      }
    }
  }
447

448 449 450 451 452 453 454 455 456 457
  std::sort(bounds.begin(), bounds.end(),
    [cfd_comparator] (const Slice& a, const Slice& b) -> bool {
      return cfd_comparator->Compare(ExtractUserKey(a), ExtractUserKey(b)) < 0;
    });
  // Remove duplicated entries from bounds
  bounds.erase(std::unique(bounds.begin(), bounds.end(),
    [cfd_comparator] (const Slice& a, const Slice& b) -> bool {
      return cfd_comparator->Compare(ExtractUserKey(a), ExtractUserKey(b)) == 0;
    }), bounds.end());

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
  // Combine consecutive pairs of boundaries into ranges with an approximate
  // size of data covered by keys in that range
  uint64_t sum = 0;
  std::vector<RangeWithSize> ranges;
  auto* v = cfd->current();
  for (auto it = bounds.begin();;) {
    const Slice a = *it;
    it++;

    if (it == bounds.end()) {
      break;
    }

    const Slice b = *it;
    uint64_t size = versions_->ApproximateSize(v, a, b, start_lvl, out_lvl + 1);
    ranges.emplace_back(a, b, size);
    sum += size;
  }

  // Group the ranges into subcompactions
  const double min_file_fill_percent = 4.0 / 5;
A
Aaron Gao 已提交
479 480 481
  uint64_t max_output_files = static_cast<uint64_t>(
      std::ceil(sum / min_file_fill_percent /
                c->mutable_cf_options()->MaxFileSizeForLevel(out_lvl)));
482 483 484 485 486 487 488 489 490 491 492 493
  uint64_t subcompactions =
      std::min({static_cast<uint64_t>(ranges.size()),
                static_cast<uint64_t>(db_options_.max_subcompactions),
                max_output_files});

  double mean = sum * 1.0 / subcompactions;

  if (subcompactions > 1) {
    // Greedily add ranges to the subcompaction until the sum of the ranges'
    // sizes becomes >= the expected mean size of a subcompaction
    sum = 0;
    for (size_t i = 0; i < ranges.size() - 1; i++) {
494
      sum += ranges[i].size;
495 496 497
      if (subcompactions == 1) {
        // If there's only one left to schedule then it goes to the end so no
        // need to put an end boundary
498
        continue;
499 500 501 502 503 504 505 506 507 508 509 510
      }
      if (sum >= mean) {
        boundaries_.emplace_back(ExtractUserKey(ranges[i].range.limit));
        sizes_.emplace_back(sum);
        subcompactions--;
        sum = 0;
      }
    }
    sizes_.emplace_back(sum + ranges.back().size);
  } else {
    // Only one range so its size is the total sum of sizes computed above
    sizes_.emplace_back(sum);
511
  }
I
Igor Canadi 已提交
512 513 514
}

Status CompactionJob::Run() {
515 516
  AutoThreadOperationStageUpdater stage_updater(
      ThreadStatus::STAGE_COMPACTION_RUN);
517
  TEST_SYNC_POINT("CompactionJob::Run():Start");
I
Igor Canadi 已提交
518
  log_buffer_->FlushBufferToLog();
519
  LogCompaction();
520

521 522
  const size_t num_threads = compact_->sub_compact_states.size();
  assert(num_threads > 0);
523
  const uint64_t start_micros = env_->NowMicros();
524 525 526 527 528 529 530

  // Launch a thread for each of subcompactions 1...num_threads-1
  std::vector<std::thread> thread_pool;
  thread_pool.reserve(num_threads - 1);
  for (size_t i = 1; i < compact_->sub_compact_states.size(); i++) {
    thread_pool.emplace_back(&CompactionJob::ProcessKeyValueCompaction, this,
                             &compact_->sub_compact_states[i]);
531
  }
532 533 534 535 536 537 538 539 540 541

  // Always schedule the first subcompaction (whether or not there are also
  // others) in the current thread to be efficient with resources
  ProcessKeyValueCompaction(&compact_->sub_compact_states[0]);

  // Wait for all other threads (if there are any) to finish execution
  for (auto& thread : thread_pool) {
    thread.join();
  }

542
  if (output_directory_ && !db_options_.disable_data_sync) {
543 544 545
    output_directory_->Fsync();
  }

546 547
  compaction_stats_.micros = env_->NowMicros() - start_micros;
  MeasureTime(stats_, COMPACTION_TIME, compaction_stats_.micros);
548

549
  // Check if any thread encountered an error during execution
550 551 552 553
  Status status;
  for (const auto& state : compact_->sub_compact_states) {
    if (!state.status.ok()) {
      status = state.status;
554 555 556 557
      break;
    }
  }

558 559 560 561 562 563 564 565 566 567
  TablePropertiesCollection tp;
  for (const auto& state : compact_->sub_compact_states) {
    for (const auto& output : state.outputs) {
      auto fn = TableFileName(db_options_.db_paths, output.meta.fd.GetNumber(),
                              output.meta.fd.GetPathId());
      tp[fn] = output.table_properties;
    }
  }
  compact_->compaction->SetOutputTableProperties(std::move(tp));

568 569
  // Finish up all book-keeping to unify the subcompaction results
  AggregateStatistics();
570 571 572 573 574
  UpdateCompactionStats();
  RecordCompactionIOStats();
  LogFlush(db_options_.info_log);
  TEST_SYNC_POINT("CompactionJob::Run():End");

575
  compact_->status = status;
576 577 578
  return status;
}

579
Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
580 581
  AutoThreadOperationStageUpdater stage_updater(
      ThreadStatus::STAGE_COMPACTION_INSTALL);
582
  db_mutex_->AssertHeld();
583
  Status status = compact_->status;
I
Igor Canadi 已提交
584 585 586 587
  ColumnFamilyData* cfd = compact_->compaction->column_family_data();
  cfd->internal_stats()->AddCompactionStats(
      compact_->compaction->output_level(), compaction_stats_);

588
  if (status.ok()) {
589
    status = InstallCompactionResults(mutable_cf_options);
I
Igor Canadi 已提交
590 591
  }
  VersionStorageInfo::LevelSummaryStorage tmp;
592
  auto vstorage = cfd->current()->storage_info();
I
Igor Canadi 已提交
593
  const auto& stats = compaction_stats_;
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
  LogToBuffer(
      log_buffer_,
      "[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, "
      "files in(%d, %d) out(%d) "
      "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
      "write-amplify(%.1f) %s, records in: %d, records dropped: %d\n",
      cfd->GetName().c_str(), vstorage->LevelSummary(&tmp),
      (stats.bytes_read_non_output_levels + stats.bytes_read_output_level) /
          static_cast<double>(stats.micros),
      stats.bytes_written / static_cast<double>(stats.micros),
      compact_->compaction->output_level(),
      stats.num_input_files_in_non_output_levels,
      stats.num_input_files_in_output_level,
      stats.num_output_files,
      stats.bytes_read_non_output_levels / 1048576.0,
      stats.bytes_read_output_level / 1048576.0,
      stats.bytes_written / 1048576.0,
      (stats.bytes_written + stats.bytes_read_output_level +
       stats.bytes_read_non_output_levels) /
          static_cast<double>(stats.bytes_read_non_output_levels),
      stats.bytes_written /
          static_cast<double>(stats.bytes_read_non_output_levels),
616
      status.ToString().c_str(), stats.num_input_records,
617
      stats.num_dropped_records);
I
Igor Canadi 已提交
618

619 620
  UpdateCompactionJobStats(stats);

621
  auto stream = event_logger_->LogToBuffer(log_buffer_);
A
Ari Ekmekji 已提交
622 623 624
  stream << "job" << job_id_
         << "event" << "compaction_finished"
         << "compaction_time_micros" << compaction_stats_.micros
625
         << "output_level" << compact_->compaction->output_level()
626
         << "num_output_files" << compact_->NumOutputFiles()
627 628
         << "total_output_size" << compact_->total_bytes
         << "num_input_records" << compact_->num_input_records
629 630
         << "num_output_records" << compact_->num_output_records
         << "num_subcompactions" << compact_->sub_compact_states.size();
631

632 633 634 635 636 637 638
  if (compaction_job_stats_ != nullptr) {
    stream << "num_single_delete_mismatches"
           << compaction_job_stats_->num_single_del_mismatch;
    stream << "num_single_delete_fallthrough"
           << compaction_job_stats_->num_single_del_fallthru;
  }

639 640 641 642 643 644 645 646 647
  if (measure_io_stats_ && compaction_job_stats_ != nullptr) {
    stream << "file_write_nanos" << compaction_job_stats_->file_write_nanos;
    stream << "file_range_sync_nanos"
           << compaction_job_stats_->file_range_sync_nanos;
    stream << "file_fsync_nanos" << compaction_job_stats_->file_fsync_nanos;
    stream << "file_prepare_write_nanos"
           << compaction_job_stats_->file_prepare_write_nanos;
  }

648 649 650 651 652 653 654
  stream << "lsm_state";
  stream.StartArray();
  for (int level = 0; level < vstorage->num_levels(); ++level) {
    stream << vstorage->NumLevelFiles(level);
  }
  stream.EndArray();

655 656
  CleanupCompaction();
  return status;
I
Igor Canadi 已提交
657 658
}

659
void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
660
  assert(sub_compact != nullptr);
661 662 663 664 665
  ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
  std::unique_ptr<RangeDelAggregator> range_del_agg(
      new RangeDelAggregator(cfd->internal_comparator(), existing_snapshots_));
  std::unique_ptr<InternalIterator> input(versions_->MakeInputIterator(
      sub_compact->compaction, range_del_agg.get()));
666

667 668
  AutoThreadOperationStageUpdater stage_updater(
      ThreadStatus::STAGE_COMPACTION_PROCESS_KV);
669 670 671

  // I/O measurement variables
  PerfLevel prev_perf_level = PerfLevel::kEnableTime;
672
  const uint64_t kRecordStatsEvery = 1000;
673 674 675 676 677 678 679
  uint64_t prev_write_nanos = 0;
  uint64_t prev_fsync_nanos = 0;
  uint64_t prev_range_sync_nanos = 0;
  uint64_t prev_prepare_write_nanos = 0;
  if (measure_io_stats_) {
    prev_perf_level = GetPerfLevel();
    SetPerfLevel(PerfLevel::kEnableTime);
I
Igor Canadi 已提交
680 681 682 683
    prev_write_nanos = IOSTATS(write_nanos);
    prev_fsync_nanos = IOSTATS(fsync_nanos);
    prev_range_sync_nanos = IOSTATS(range_sync_nanos);
    prev_prepare_write_nanos = IOSTATS(prepare_write_nanos);
684 685
  }

686 687
  const MutableCFOptions* mutable_cf_options =
      sub_compact->compaction->mutable_cf_options();
688 689 690 691 692 693 694 695 696 697 698

  // To build compression dictionary, we sample the first output file, assuming
  // it'll reach the maximum length, and then use the dictionary for compressing
  // subsequent output files. The dictionary may be less than max_dict_bytes if
  // the first output file's length is less than the maximum.
  const int kSampleLenShift = 6;  // 2^6 = 64-byte samples
  std::set<size_t> sample_begin_offsets;
  if (bottommost_level_ &&
      cfd->ioptions()->compression_opts.max_dict_bytes > 0) {
    const size_t kMaxSamples =
        cfd->ioptions()->compression_opts.max_dict_bytes >> kSampleLenShift;
699 700
    const size_t kOutFileLen = mutable_cf_options->MaxFileSizeForLevel(
        compact_->compaction->output_level());
701 702 703 704 705 706 707 708 709 710
    if (kOutFileLen != port::kMaxSizet) {
      const size_t kOutFileNumSamples = kOutFileLen >> kSampleLenShift;
      Random64 generator{versions_->NewFileNumber()};
      for (size_t i = 0; i < kMaxSamples; ++i) {
        sample_begin_offsets.insert(generator.Uniform(kOutFileNumSamples)
                                    << kSampleLenShift);
      }
    }
  }

I
Igor Canadi 已提交
711 712
  auto compaction_filter = cfd->ioptions()->compaction_filter;
  std::unique_ptr<CompactionFilter> compaction_filter_from_factory = nullptr;
713
  if (compaction_filter == nullptr) {
I
Igor Canadi 已提交
714
    compaction_filter_from_factory =
715
        sub_compact->compaction->CreateCompactionFilter();
I
Igor Canadi 已提交
716 717
    compaction_filter = compaction_filter_from_factory.get();
  }
I
Igor Canadi 已提交
718 719 720
  MergeHelper merge(
      env_, cfd->user_comparator(), cfd->ioptions()->merge_operator,
      compaction_filter, db_options_.info_log.get(),
A
Aaron Gao 已提交
721
      mutable_cf_options->min_partial_merge_operands,
I
Igor Canadi 已提交
722 723 724
      false /* internal key corruption is expected */,
      existing_snapshots_.empty() ? 0 : existing_snapshots_.back(),
      compact_->compaction->level(), db_options_.statistics.get());
I
Igor Canadi 已提交
725

726
  TEST_SYNC_POINT("CompactionJob::Run():Inprogress");
727

728 729
  Slice* start = sub_compact->start;
  Slice* end = sub_compact->end;
730 731 732
  if (start != nullptr) {
    IterKey start_iter;
    start_iter.SetInternalKey(*start, kMaxSequenceNumber, kValueTypeForSeek);
733
    input->Seek(start_iter.GetKey());
734 735 736 737
  } else {
    input->SeekToFirst();
  }

738 739 740
  Status status;
  sub_compact->c_iter.reset(new CompactionIterator(
      input.get(), cfd->user_comparator(), &merge, versions_->LastSequence(),
741
      &existing_snapshots_, earliest_write_conflict_snapshot_, env_, false,
742
      range_del_agg.get(), sub_compact->compaction, compaction_filter));
743 744 745
  auto c_iter = sub_compact->c_iter.get();
  c_iter->SeekToFirst();
  const auto& c_iter_stats = c_iter->iter_stats();
746 747 748 749 750 751 752
  auto sample_begin_offset_iter = sample_begin_offsets.cbegin();
  // data_begin_offset and compression_dict are only valid while generating
  // dictionary from the first output file.
  size_t data_begin_offset = 0;
  std::string compression_dict;
  compression_dict.reserve(cfd->ioptions()->compression_opts.max_dict_bytes);

753 754
  // TODO(noetzli): check whether we could check !shutting_down_->... only
  // only occasionally (see diff D42687)
755 756 757 758 759 760
  while (status.ok() && !shutting_down_->load(std::memory_order_acquire) &&
         !cfd->IsDropped() && c_iter->Valid()) {
    // Invariant: c_iter.status() is guaranteed to be OK if c_iter->Valid()
    // returns true.
    const Slice& key = c_iter->key();
    const Slice& value = c_iter->value();
761

762 763
    // If an end key (exclusive) is specified, check if the current key is
    // >= than it and exit if it is because the iterator is out of its range
764
    if (end != nullptr &&
765
        cfd->user_comparator()->Compare(c_iter->user_key(), *end) >= 0) {
766
      break;
767 768
    } else if (sub_compact->compaction->output_level() != 0 &&
               sub_compact->ShouldStopBefore(
769
                   key, sub_compact->current_output_file_size) &&
770
               sub_compact->builder != nullptr) {
771 772
      status = FinishCompactionOutputFile(input->status(), sub_compact,
                                          range_del_agg.get());
I
Igor Canadi 已提交
773 774 775 776 777
      if (!status.ok()) {
        break;
      }
    }

778 779 780 781 782
    if (c_iter_stats.num_input_records % kRecordStatsEvery ==
        kRecordStatsEvery - 1) {
      RecordDroppedKeys(c_iter_stats, &sub_compact->compaction_job_stats);
      c_iter->ResetRecordCounts();
      RecordCompactionIOStats();
I
Igor Canadi 已提交
783 784
    }

785 786 787 788
    // Open output file if necessary
    if (sub_compact->builder == nullptr) {
      status = OpenCompactionOutputFile(sub_compact);
      if (!status.ok()) {
789 790
        break;
      }
791 792 793 794
    }
    assert(sub_compact->builder != nullptr);
    assert(sub_compact->current_output() != nullptr);
    sub_compact->builder->Add(key, value);
795
    sub_compact->current_output_file_size = sub_compact->builder->FileSize();
796 797 798 799
    sub_compact->current_output()->meta.UpdateBoundaries(
        key, c_iter->ikey().sequence);
    sub_compact->num_output_records++;

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
    if (sub_compact->outputs.size() == 1) {  // first output file
      // Check if this key/value overlaps any sample intervals; if so, appends
      // overlapping portions to the dictionary.
      for (const auto& data_elmt : {key, value}) {
        size_t data_end_offset = data_begin_offset + data_elmt.size();
        while (sample_begin_offset_iter != sample_begin_offsets.cend() &&
               *sample_begin_offset_iter < data_end_offset) {
          size_t sample_end_offset =
              *sample_begin_offset_iter + (1 << kSampleLenShift);
          // Invariant: Because we advance sample iterator while processing the
          // data_elmt containing the sample's last byte, the current sample
          // cannot end before the current data_elmt.
          assert(data_begin_offset < sample_end_offset);

          size_t data_elmt_copy_offset, data_elmt_copy_len;
          if (*sample_begin_offset_iter <= data_begin_offset) {
            // The sample starts before data_elmt starts, so take bytes starting
            // at the beginning of data_elmt.
            data_elmt_copy_offset = 0;
          } else {
            // data_elmt starts before the sample starts, so take bytes starting
            // at the below offset into data_elmt.
            data_elmt_copy_offset =
                *sample_begin_offset_iter - data_begin_offset;
          }
          if (sample_end_offset <= data_end_offset) {
            // The sample ends before data_elmt ends, so take as many bytes as
            // needed.
            data_elmt_copy_len =
                sample_end_offset - (data_begin_offset + data_elmt_copy_offset);
          } else {
            // data_elmt ends before the sample ends, so take all remaining
            // bytes in data_elmt.
            data_elmt_copy_len =
                data_end_offset - (data_begin_offset + data_elmt_copy_offset);
          }
          compression_dict.append(&data_elmt.data()[data_elmt_copy_offset],
                                  data_elmt_copy_len);
          if (sample_end_offset > data_end_offset) {
            // Didn't finish sample. Try to finish it with the next data_elmt.
            break;
          }
          // Next sample may require bytes from same data_elmt.
          sample_begin_offset_iter++;
        }
        data_begin_offset = data_end_offset;
      }
    }

849 850 851
    Status input_status = input->status();
    c_iter->Next();

852 853 854 855 856
    // Close output file if it is big enough
    // TODO(aekmekji): determine if file should be closed earlier than this
    // during subcompactions (i.e. if output size, estimated by input size, is
    // going to be 1.2MB and max_output_file_size = 1MB, prefer to have 0.6MB
    // and 0.6MB instead of 1MB and 0.2MB)
857 858 859
    if (sub_compact->compaction->output_level() != 0 &&
        sub_compact->current_output_file_size >=
            sub_compact->compaction->max_output_file_size()) {
860 861 862 863 864 865
      const Slice* next_key = nullptr;
      if (c_iter->Valid()) {
        next_key = &c_iter->key();
      }
      status = FinishCompactionOutputFile(input_status, sub_compact,
                                          range_del_agg.get(), next_key);
866 867 868 869 870
      if (sub_compact->outputs.size() == 1) {
        // Use dictionary from first output file for compression of subsequent
        // files.
        sub_compact->compression_dict = std::move(compression_dict);
      }
I
Igor Canadi 已提交
871 872
    }
  }
873

874 875 876 877 878
  sub_compact->num_input_records = c_iter_stats.num_input_records;
  sub_compact->compaction_job_stats.num_input_deletion_records =
      c_iter_stats.num_input_deletion_records;
  sub_compact->compaction_job_stats.num_corrupt_keys =
      c_iter_stats.num_input_corrupt_records;
879 880 881 882
  sub_compact->compaction_job_stats.num_single_del_fallthru =
      c_iter_stats.num_single_del_fallthru;
  sub_compact->compaction_job_stats.num_single_del_mismatch =
      c_iter_stats.num_single_del_mismatch;
883 884 885 886 887 888 889 890
  sub_compact->compaction_job_stats.total_input_raw_key_bytes +=
      c_iter_stats.total_input_raw_key_bytes;
  sub_compact->compaction_job_stats.total_input_raw_value_bytes +=
      c_iter_stats.total_input_raw_value_bytes;

  RecordTick(stats_, FILTER_OPERATION_TOTAL_TIME,
             c_iter_stats.total_filter_time);
  RecordDroppedKeys(c_iter_stats, &sub_compact->compaction_job_stats);
I
Igor Canadi 已提交
891 892
  RecordCompactionIOStats();

893 894 895 896 897
  if (status.ok() &&
      (shutting_down_->load(std::memory_order_acquire) || cfd->IsDropped())) {
    status = Status::ShutdownInProgress(
        "Database shutdown or Column family drop during compaction");
  }
898 899 900 901
  if (status.ok() && sub_compact->builder == nullptr &&
      range_del_agg->ShouldAddTombstones(bottommost_level_)) {
    status = OpenCompactionOutputFile(sub_compact);
  }
902
  if (status.ok() && sub_compact->builder != nullptr) {
903 904
    status = FinishCompactionOutputFile(input->status(), sub_compact,
                                        range_del_agg.get());
905 906 907 908 909
  }
  if (status.ok()) {
    status = input->status();
  }

910 911
  if (measure_io_stats_) {
    sub_compact->compaction_job_stats.file_write_nanos +=
I
Igor Canadi 已提交
912
        IOSTATS(write_nanos) - prev_write_nanos;
913
    sub_compact->compaction_job_stats.file_fsync_nanos +=
I
Igor Canadi 已提交
914
        IOSTATS(fsync_nanos) - prev_fsync_nanos;
915
    sub_compact->compaction_job_stats.file_range_sync_nanos +=
I
Igor Canadi 已提交
916
        IOSTATS(range_sync_nanos) - prev_range_sync_nanos;
917
    sub_compact->compaction_job_stats.file_prepare_write_nanos +=
I
Igor Canadi 已提交
918
        IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos;
919 920 921 922 923
    if (prev_perf_level != PerfLevel::kEnableTime) {
      SetPerfLevel(prev_perf_level);
    }
  }

924 925
  sub_compact->c_iter.reset();
  input.reset();
926
  sub_compact->status = status;
I
Igor Canadi 已提交
927 928
}

929
void CompactionJob::RecordDroppedKeys(
930
    const CompactionIteratorStats& c_iter_stats,
931
    CompactionJobStats* compaction_job_stats) {
932 933 934
  if (c_iter_stats.num_record_drop_user > 0) {
    RecordTick(stats_, COMPACTION_KEY_DROP_USER,
               c_iter_stats.num_record_drop_user);
935
  }
936 937 938
  if (c_iter_stats.num_record_drop_hidden > 0) {
    RecordTick(stats_, COMPACTION_KEY_DROP_NEWER_ENTRY,
               c_iter_stats.num_record_drop_hidden);
939
    if (compaction_job_stats) {
940 941
      compaction_job_stats->num_records_replaced +=
          c_iter_stats.num_record_drop_hidden;
942 943
    }
  }
944 945 946
  if (c_iter_stats.num_record_drop_obsolete > 0) {
    RecordTick(stats_, COMPACTION_KEY_DROP_OBSOLETE,
               c_iter_stats.num_record_drop_obsolete);
947
    if (compaction_job_stats) {
948 949
      compaction_job_stats->num_expired_deletion_records +=
          c_iter_stats.num_record_drop_obsolete;
950
    }
951 952 953
  }
}

954
Status CompactionJob::FinishCompactionOutputFile(
955 956 957
    const Status& input_status, SubcompactionState* sub_compact,
    RangeDelAggregator* range_del_agg,
    const Slice* next_table_min_key /* = nullptr */) {
958 959
  AutoThreadOperationStageUpdater stage_updater(
      ThreadStatus::STAGE_COMPACTION_SYNC_FILE);
960 961 962
  assert(sub_compact != nullptr);
  assert(sub_compact->outfile);
  assert(sub_compact->builder != nullptr);
963
  assert(sub_compact->current_output() != nullptr);
I
Igor Canadi 已提交
964

965
  uint64_t output_number = sub_compact->current_output()->meta.fd.GetNumber();
I
Igor Canadi 已提交
966 967
  assert(output_number != 0);

968
  TableProperties table_properties;
I
Igor Canadi 已提交
969
  // Check for iterator errors
970
  Status s = input_status;
971
  auto meta = &sub_compact->current_output()->meta;
972 973 974 975 976 977 978 979 980 981
  if (s.ok()) {
    // For the first output table, include range tombstones before the min key
    // boundary. For subsequent output tables, this is unnecessary because we
    // extend each file's max key boundary up until the next file's min key when
    // range tombstones fall in the gap.
    range_del_agg->AddToBuilder(
        sub_compact->builder.get(),
        sub_compact->outputs.size() == 1 /* extend_before_min_key */,
        next_table_min_key, meta, bottommost_level_);
  }
982
  const uint64_t current_entries = sub_compact->builder->NumEntries();
983
  meta->marked_for_compaction = sub_compact->builder->NeedCompact();
I
Igor Canadi 已提交
984
  if (s.ok()) {
985
    s = sub_compact->builder->Finish();
I
Igor Canadi 已提交
986
  } else {
987
    sub_compact->builder->Abandon();
I
Igor Canadi 已提交
988
  }
989
  const uint64_t current_bytes = sub_compact->builder->FileSize();
990 991
  meta->fd.file_size = current_bytes;
  sub_compact->current_output()->finished = true;
992
  sub_compact->total_bytes += current_bytes;
I
Igor Canadi 已提交
993 994

  // Finish and check for file errors
995
  if (s.ok() && !db_options_.disable_data_sync) {
996
    StopWatch sw(env_, stats_, COMPACTION_OUTFILE_SYNC_MICROS);
997
    s = sub_compact->outfile->Sync(db_options_.use_fsync);
I
Igor Canadi 已提交
998 999
  }
  if (s.ok()) {
1000
    s = sub_compact->outfile->Close();
I
Igor Canadi 已提交
1001
  }
1002
  sub_compact->outfile.reset();
I
Igor Canadi 已提交
1003

1004 1005
  ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
  TableProperties tp;
I
Igor Canadi 已提交
1006 1007
  if (s.ok() && current_entries > 0) {
    // Verify that the table is usable
S
sdong 已提交
1008
    InternalIterator* iter = cfd->table_cache()->NewIterator(
1009 1010 1011
        ReadOptions(), env_options_, cfd->internal_comparator(), meta->fd,
        nullptr, cfd->internal_stats()->GetFileReadHist(
                     compact_->compaction->output_level()),
1012
        false);
I
Igor Canadi 已提交
1013
    s = iter->status();
1014

I
Igor Canadi 已提交
1015
    if (s.ok() && paranoid_file_checks_) {
1016 1017 1018 1019
      for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {}
      s = iter->status();
    }

I
Igor Canadi 已提交
1020
    delete iter;
1021 1022

    // Output to event logger and fire events.
I
Igor Canadi 已提交
1023
    if (s.ok()) {
1024
      tp = sub_compact->builder->GetTableProperties();
1025 1026
      sub_compact->current_output()->table_properties =
          std::make_shared<TableProperties>(tp);
1027 1028
      Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
          "[%s] [JOB %d] Generated table #%" PRIu64 ": %" PRIu64
1029
          " keys, %" PRIu64 " bytes%s",
1030
          cfd->GetName().c_str(), job_id_, output_number, current_entries,
1031
          current_bytes,
1032
          meta->marked_for_compaction ? " (need compaction)" : "");
I
Igor Canadi 已提交
1033 1034
    }
  }
1035 1036 1037 1038 1039
  std::string fname = TableFileName(db_options_.db_paths, meta->fd.GetNumber(),
                                    meta->fd.GetPathId());
  EventHelpers::LogAndNotifyTableFileCreationFinished(
      event_logger_, cfd->ioptions()->listeners, dbname_, cfd->GetName(), fname,
      job_id_, meta->fd, tp, TableFileCreationReason::kCompaction, s);
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058

  // Report new file to SstFileManagerImpl
  auto sfm =
      static_cast<SstFileManagerImpl*>(db_options_.sst_file_manager.get());
  if (sfm && meta->fd.GetPathId() == 0) {
    auto fn = TableFileName(cfd->ioptions()->db_paths, meta->fd.GetNumber(),
                            meta->fd.GetPathId());
    sfm->OnAddFile(fn);
    if (sfm->IsMaxAllowedSpaceReached()) {
      InstrumentedMutexLock l(db_mutex_);
      if (db_bg_error_->ok()) {
        s = Status::IOError("Max allowed space was reached");
        *db_bg_error_ = s;
        TEST_SYNC_POINT(
            "CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached");
      }
    }
  }

1059
  sub_compact->builder.reset();
1060
  sub_compact->current_output_file_size = 0;
I
Igor Canadi 已提交
1061 1062 1063
  return s;
}

I
Igor Canadi 已提交
1064
Status CompactionJob::InstallCompactionResults(
1065 1066
    const MutableCFOptions& mutable_cf_options) {
  db_mutex_->AssertHeld();
I
Igor Canadi 已提交
1067

1068
  auto* compaction = compact_->compaction;
I
Igor Canadi 已提交
1069 1070 1071 1072
  // paranoia: verify that the files that we started with
  // still exist in the current version and in the same original level.
  // This ensures that a concurrent compaction did not erroneously
  // pick the same files to compact_.
1073 1074 1075
  if (!versions_->VerifyCompactionFileConsistency(compaction)) {
    Compaction::InputLevelSummaryBuffer inputs_summary;

1076
    Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
1077 1078 1079
        "[%s] [JOB %d] Compaction %s aborted",
        compaction->column_family_data()->GetName().c_str(), job_id_,
        compaction->InputLevelSummary(&inputs_summary));
I
Igor Canadi 已提交
1080 1081 1082
    return Status::Corruption("Compaction input files inconsistent");
  }

1083 1084 1085 1086 1087 1088 1089
  {
    Compaction::InputLevelSummaryBuffer inputs_summary;
    Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
        "[%s] [JOB %d] Compacted %s => %" PRIu64 " bytes",
        compaction->column_family_data()->GetName().c_str(), job_id_,
        compaction->InputLevelSummary(&inputs_summary), compact_->total_bytes);
  }
I
Igor Canadi 已提交
1090 1091

  // Add compaction outputs
1092
  compaction->AddInputDeletions(compact_->compaction->edit());
1093

1094 1095 1096
  for (const auto& sub_compact : compact_->sub_compact_states) {
    for (const auto& out : sub_compact.outputs) {
      compaction->edit()->AddFile(compaction->output_level(), out.meta);
1097
    }
1098 1099
  }
  return versions_->LogAndApply(compaction->column_family_data(),
I
Igor Canadi 已提交
1100
                                mutable_cf_options, compaction->edit(),
1101
                                db_mutex_, db_directory_);
I
Igor Canadi 已提交
1102 1103 1104 1105
}

void CompactionJob::RecordCompactionIOStats() {
  RecordTick(stats_, COMPACT_READ_BYTES, IOSTATS(bytes_read));
1106 1107
  ThreadStatusUtil::IncreaseThreadOperationProperty(
      ThreadStatus::COMPACTION_BYTES_READ, IOSTATS(bytes_read));
I
Igor Canadi 已提交
1108 1109
  IOSTATS_RESET(bytes_read);
  RecordTick(stats_, COMPACT_WRITE_BYTES, IOSTATS(bytes_written));
1110 1111
  ThreadStatusUtil::IncreaseThreadOperationProperty(
      ThreadStatus::COMPACTION_BYTES_WRITTEN, IOSTATS(bytes_written));
I
Igor Canadi 已提交
1112 1113 1114
  IOSTATS_RESET(bytes_written);
}

1115 1116
Status CompactionJob::OpenCompactionOutputFile(
    SubcompactionState* sub_compact) {
1117 1118
  assert(sub_compact != nullptr);
  assert(sub_compact->builder == nullptr);
1119 1120
  // no need to lock because VersionSet::next_file_number_ is atomic
  uint64_t file_number = versions_->NewFileNumber();
I
Igor Canadi 已提交
1121
  std::string fname = TableFileName(db_options_.db_paths, file_number,
1122
                                    sub_compact->compaction->output_path_id());
1123 1124 1125 1126 1127 1128 1129 1130 1131
  // Fire events.
  ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
#ifndef ROCKSDB_LITE
  EventHelpers::NotifyTableFileCreationStarted(
      cfd->ioptions()->listeners, dbname_, cfd->GetName(), fname, job_id_,
      TableFileCreationReason::kCompaction);
#endif  // !ROCKSDB_LITE
  // Make the output file
  unique_ptr<WritableFile> writable_file;
S
sdong 已提交
1132
  Status s = NewWritableFile(env_, fname, &writable_file, env_options_);
I
Igor Canadi 已提交
1133 1134
  if (!s.ok()) {
    Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
1135
        "[%s] [JOB %d] OpenCompactionOutputFiles for table #%" PRIu64
1136
        " fails at NewWritableFile with status %s",
1137 1138
        sub_compact->compaction->column_family_data()->GetName().c_str(),
        job_id_, file_number, s.ToString().c_str());
I
Igor Canadi 已提交
1139
    LogFlush(db_options_.info_log);
1140 1141 1142 1143
    EventHelpers::LogAndNotifyTableFileCreationFinished(
        event_logger_, cfd->ioptions()->listeners, dbname_, cfd->GetName(),
        fname, job_id_, FileDescriptor(), TableProperties(),
        TableFileCreationReason::kCompaction, s);
I
Igor Canadi 已提交
1144 1145
    return s;
  }
1146

1147
  SubcompactionState::Output out;
1148 1149 1150
  out.meta.fd =
      FileDescriptor(file_number, sub_compact->compaction->output_path_id(), 0);
  out.finished = false;
I
Igor Canadi 已提交
1151

1152
  sub_compact->outputs.push_back(out);
1153
  writable_file->SetIOPriority(Env::IO_LOW);
1154 1155 1156
  writable_file->SetPreallocationBlockSize(static_cast<size_t>(
      sub_compact->compaction->OutputFilePreallocationSize()));
  sub_compact->outfile.reset(
1157
      new WritableFileWriter(std::move(writable_file), env_options_));
I
Igor Canadi 已提交
1158

1159 1160 1161
  // If the Column family flag is to only optimize filters for hits,
  // we can skip creating filters if this is the bottommost_level where
  // data is going to be found
1162 1163
  bool skip_filters =
      cfd->ioptions()->optimize_filters_for_hits && bottommost_level_;
1164
  sub_compact->builder.reset(NewTableBuilder(
1165
      *cfd->ioptions(), cfd->internal_comparator(),
1166
      cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(),
1167
      sub_compact->outfile.get(), sub_compact->compaction->output_compression(),
1168 1169 1170
      cfd->ioptions()->compression_opts,
      sub_compact->compaction->output_level(),
      &sub_compact->compression_dict,
1171
      skip_filters));
I
Igor Canadi 已提交
1172 1173 1174 1175
  LogFlush(db_options_.info_log);
  return s;
}

1176
void CompactionJob::CleanupCompaction() {
1177
  for (SubcompactionState& sub_compact : compact_->sub_compact_states) {
1178
    const auto& sub_status = sub_compact.status;
I
Igor Canadi 已提交
1179

1180 1181 1182 1183 1184 1185 1186
    if (sub_compact.builder != nullptr) {
      // May happen if we get a shutdown call in the middle of compaction
      sub_compact.builder->Abandon();
      sub_compact.builder.reset();
    } else {
      assert(!sub_status.ok() || sub_compact.outfile == nullptr);
    }
1187
    for (const auto& out : sub_compact.outputs) {
1188 1189 1190
      // If this file was inserted into the table cache then remove
      // them here because this compaction was not committed.
      if (!sub_status.ok()) {
1191
        TableCache::Evict(table_cache_.get(), out.meta.fd.GetNumber());
1192
      }
I
Igor Canadi 已提交
1193 1194 1195 1196 1197 1198
    }
  }
  delete compact_;
  compact_ = nullptr;
}

1199 1200 1201
#ifndef ROCKSDB_LITE
namespace {
void CopyPrefix(
1202 1203 1204 1205
    const Slice& src, size_t prefix_length, std::string* dst) {
  assert(prefix_length > 0);
  size_t length = src.size() > prefix_length ? prefix_length : src.size();
  dst->assign(src.data(), length);
1206 1207 1208 1209 1210
}
}  // namespace

#endif  // !ROCKSDB_LITE

A
Andres Notzli 已提交
1211
void CompactionJob::UpdateCompactionStats() {
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
  Compaction* compaction = compact_->compaction;
  compaction_stats_.num_input_files_in_non_output_levels = 0;
  compaction_stats_.num_input_files_in_output_level = 0;
  for (int input_level = 0;
       input_level < static_cast<int>(compaction->num_input_levels());
       ++input_level) {
    if (compaction->start_level() + input_level
        != compaction->output_level()) {
      UpdateCompactionInputStatsHelper(
          &compaction_stats_.num_input_files_in_non_output_levels,
          &compaction_stats_.bytes_read_non_output_levels,
          input_level);
    } else {
      UpdateCompactionInputStatsHelper(
          &compaction_stats_.num_input_files_in_output_level,
          &compaction_stats_.bytes_read_output_level,
          input_level);
    }
  }
A
Andres Notzli 已提交
1231

1232 1233 1234 1235 1236 1237 1238 1239 1240
  for (const auto& sub_compact : compact_->sub_compact_states) {
    size_t num_output_files = sub_compact.outputs.size();
    if (sub_compact.builder != nullptr) {
      // An error occurred so ignore the last output.
      assert(num_output_files > 0);
      --num_output_files;
    }
    compaction_stats_.num_output_files += static_cast<int>(num_output_files);

1241 1242
    for (const auto& out : sub_compact.outputs) {
      compaction_stats_.bytes_written += out.meta.fd.file_size;
1243 1244 1245 1246 1247
    }
    if (sub_compact.num_input_records > sub_compact.num_output_records) {
      compaction_stats_.num_dropped_records +=
          sub_compact.num_input_records - sub_compact.num_output_records;
    }
A
Andres Notzli 已提交
1248
  }
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
}

void CompactionJob::UpdateCompactionInputStatsHelper(
    int* num_files, uint64_t* bytes_read, int input_level) {
  const Compaction* compaction = compact_->compaction;
  auto num_input_files = compaction->num_input_files(input_level);
  *num_files += static_cast<int>(num_input_files);

  for (size_t i = 0; i < num_input_files; ++i) {
    const auto* file_meta = compaction->input(input_level, i);
    *bytes_read += file_meta->fd.GetFileSize();
    compaction_stats_.num_input_records +=
        static_cast<uint64_t>(file_meta->num_entries);
  }
}

1265 1266 1267 1268 1269 1270 1271 1272
void CompactionJob::UpdateCompactionJobStats(
    const InternalStats::CompactionStats& stats) const {
#ifndef ROCKSDB_LITE
  if (compaction_job_stats_) {
    compaction_job_stats_->elapsed_micros = stats.micros;

    // input information
    compaction_job_stats_->total_input_bytes =
1273 1274 1275 1276
        stats.bytes_read_non_output_levels +
        stats.bytes_read_output_level;
    compaction_job_stats_->num_input_records =
        compact_->num_input_records;
1277
    compaction_job_stats_->num_input_files =
1278 1279
        stats.num_input_files_in_non_output_levels +
        stats.num_input_files_in_output_level;
1280
    compaction_job_stats_->num_input_files_at_output_level =
1281
        stats.num_input_files_in_output_level;
1282 1283 1284 1285 1286

    // output information
    compaction_job_stats_->total_output_bytes = stats.bytes_written;
    compaction_job_stats_->num_output_records =
        compact_->num_output_records;
1287
    compaction_job_stats_->num_output_files = stats.num_output_files;
1288

1289
    if (compact_->NumOutputFiles() > 0U) {
1290
      CopyPrefix(
1291
          compact_->SmallestUserKey(),
1292 1293
          CompactionJobStats::kMaxPrefixLength,
          &compaction_job_stats_->smallest_output_key_prefix);
1294
      CopyPrefix(
1295
          compact_->LargestUserKey(),
1296 1297
          CompactionJobStats::kMaxPrefixLength,
          &compaction_job_stats_->largest_output_key_prefix);
1298 1299 1300 1301 1302
    }
  }
#endif  // !ROCKSDB_LITE
}

1303 1304 1305 1306
void CompactionJob::LogCompaction() {
  Compaction* compaction = compact_->compaction;
  ColumnFamilyData* cfd = compaction->column_family_data();

A
Andres Notzli 已提交
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
  // Let's check if anything will get logged. Don't prepare all the info if
  // we're not logging
  if (db_options_.info_log_level <= InfoLogLevel::INFO_LEVEL) {
    Compaction::InputLevelSummaryBuffer inputs_summary;
    Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
        "[%s] [JOB %d] Compacting %s, score %.2f", cfd->GetName().c_str(),
        job_id_, compaction->InputLevelSummary(&inputs_summary),
        compaction->score());
    char scratch[2345];
    compaction->Summary(scratch, sizeof(scratch));
    Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
        "[%s] Compaction start summary: %s\n", cfd->GetName().c_str(), scratch);
    // build event logger report
    auto stream = event_logger_->Log();
    stream << "job" << job_id_ << "event"
           << "compaction_started";
    for (size_t i = 0; i < compaction->num_input_levels(); ++i) {
      stream << ("files_L" + ToString(compaction->level(i)));
      stream.StartArray();
      for (auto f : *compaction->inputs(i)) {
        stream << f->fd.GetNumber();
      }
      stream.EndArray();
    }
    stream << "score" << compaction->score() << "input_data_size"
           << compaction->CalculateTotalInputSize();
  }
}

I
Igor Canadi 已提交
1336
}  // namespace rocksdb