提交 7d65bd5c 编写于 作者: F Faustin Lammler 提交者: Facebook Github Bot

Fix spelling errors (#4827)

Summary:
Hi, Lintian, the Debian package checker complains about spelling error (spelling-error-in-binary).

See https://salsa.debian.org/mariadb-team/mariadb-10.3/-/jobs/98380
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4827

Differential Revision: D13566362

Pulled By: riversand963

fbshipit-source-id: cd4e9212133c73b0591030de6cdedaa47575968d
上级 f8d5c1b0
......@@ -97,7 +97,7 @@ function getSteps($applyDiff, $diffID, $username, $test) {
}
// fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise
// Git thinks it is an uncommited change.
// Git thinks it is an uncommitted change.
$fix_git_ignore = array(
"name" => "Fix git ignore",
"shell" => "echo fbcode >> .git/info/exclude",
......
......@@ -2042,7 +2042,7 @@ TEST_P(ColumnFamilyTest, SameCFAutomaticManualCompactions) {
}
#endif // !ROCKSDB_LITE
#ifndef ROCKSDB_LITE // Tailing interator not supported
#ifndef ROCKSDB_LITE // Tailing iterator not supported
namespace {
std::string IterStatus(Iterator* iter) {
std::string result;
......
......@@ -173,7 +173,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
delete iter;
iter = nullptr;
// Release interators and access cache again.
// Release iterators and access cache again.
for (size_t i = 0; i < kNumBlocks - 1; i++) {
iterators[i].reset();
CheckCacheCounters(options, 0, 0, 0, 0);
......
......@@ -1868,7 +1868,7 @@ Status DBImpl::NewIterators(
if (read_options.tailing) {
#ifdef ROCKSDB_LITE
return Status::InvalidArgument(
"Tailing interator not supported in RocksDB lite");
"Tailing iterator not supported in RocksDB lite");
#else
for (auto cfh : column_families) {
auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh)->cfd();
......
......@@ -1472,7 +1472,7 @@ class DBImpl : public DB {
std::atomic<bool> has_unpersisted_data_;
// if an attempt was made to flush all column families that
// the oldest log depends on but uncommited data in the oldest
// the oldest log depends on but uncommitted data in the oldest
// log prevents the log from being released.
// We must attempt to free the dependent memtables again
// at a later time after the transaction in the oldest
......
......@@ -1048,22 +1048,22 @@ Status DBImpl::SwitchWAL(WriteContext* write_context) {
auto oldest_alive_log = alive_log_files_.begin()->number;
bool flush_wont_release_oldest_log = false;
if (allow_2pc()) {
auto oldest_log_with_uncommited_prep =
auto oldest_log_with_uncommitted_prep =
logs_with_prep_tracker_.FindMinLogContainingOutstandingPrep();
assert(oldest_log_with_uncommited_prep == 0 ||
oldest_log_with_uncommited_prep >= oldest_alive_log);
if (oldest_log_with_uncommited_prep > 0 &&
oldest_log_with_uncommited_prep == oldest_alive_log) {
assert(oldest_log_with_uncommitted_prep == 0 ||
oldest_log_with_uncommitted_prep >= oldest_alive_log);
if (oldest_log_with_uncommitted_prep > 0 &&
oldest_log_with_uncommitted_prep == oldest_alive_log) {
if (unable_to_release_oldest_log_) {
// we already attempted to flush all column families dependent on
// the oldest alive log but the log still contained uncommited
// the oldest alive log but the log still contained uncommitted
// transactions so there is still nothing that we can do.
return status;
} else {
ROCKS_LOG_WARN(
immutable_db_options_.info_log,
"Unable to release oldest log due to uncommited transaction");
"Unable to release oldest log due to uncommitted transaction");
unable_to_release_oldest_log_ = true;
flush_wont_release_oldest_log = true;
}
......
......@@ -263,7 +263,7 @@ const char* VersionEdit::DecodeNewFile4From(Slice* input) {
break;
}
if (!GetLengthPrefixedSlice(input, &field)) {
return "new-file4 custom field lenth prefixed slice error";
return "new-file4 custom field length prefixed slice error";
}
switch (custom_tag) {
case kPathId:
......
......@@ -419,7 +419,7 @@ void WritePreparedTxn::SetSnapshot() {
// the mutex overhead, we call SmallestUnCommittedSeq BEFORE taking the
// snapshot. Since we always updated the list of unprepared seq (via
// AddPrepared) AFTER the last sequence is updated, this guarantees that the
// smallest uncommited seq that we pair with the snapshot is smaller or equal
// smallest uncommitted seq that we pair with the snapshot is smaller or equal
// the value that would be obtained otherwise atomically. That is ok since
// this optimization works as long as min_uncommitted is less than or equal
// than the smallest uncommitted seq when the snapshot was taken.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册