db_crashtest.py 18.4 KB
Newer Older
1
#!/usr/bin/env python2
2
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3 4 5
import os
import sys
import time
6
import random
7
import tempfile
8
import subprocess
I
Igor Canadi 已提交
9
import shutil
10
import argparse
11

12 13
# params overwrite priority:
#   for default:
14
#       default_params < {blackbox,whitebox}_default_params < args
15
#   for simple:
16 17 18
#       default_params < {blackbox,whitebox}_default_params <
#       simple_default_params <
#       {blackbox,whitebox}_simple_default_params < args
19
#   for cf_consistency:
20
#       default_params < {blackbox,whitebox}_default_params <
21
#       cf_consistency_params < args
22 23
#   for txn:
#       default_params < {blackbox,whitebox}_default_params < txn_params < args
24

25 26
expected_values_file = tempfile.NamedTemporaryFile()

27
default_params = {
28
    "acquire_snapshot_one_in": 10000,
29
    "block_size": 16384,
30 31
    "bloom_bits": lambda: random.choice([random.randint(0,19),
                                         random.lognormvariate(2.3, 1.3)]),
32
    "cache_index_and_filter_blocks": lambda: random.randint(0, 1),
33
    "cache_size": 1048576,
34
    "checkpoint_one_in": 1000000,
S
sdong 已提交
35 36 37
    "compression_type": lambda: random.choice(
        ["snappy", "none", "zlib"]),
    "checksum_type" : lambda: random.choice(["kCRC32c", "kxxHash", "kxxHash64"]),
38 39
    "compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1),
    "compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1),
40
    "clear_column_family_one_in": 0,
41 42
    "compact_files_one_in": 1000000,
    "compact_range_one_in": 1000000,
43 44
    "delpercent": 4,
    "delrangepercent": 1,
45
    "destroy_db_initially": 0,
46
    "enable_pipelined_write": lambda: random.randint(0, 1),
47
    "expected_values_path": expected_values_file.name,
48
    "flush_one_in": 1000000,
49 50
    # Temporarily disable hash index
    "index_type": lambda: random.choice([0,2]),
51 52 53 54 55
    "max_background_compactions": 20,
    "max_bytes_for_level_base": 10485760,
    "max_key": 100000000,
    "max_write_buffer_number": 3,
    "mmap_read": lambda: random.randint(0, 1),
56
    "nooverwritepercent": 1,
57
    "open_files": lambda : random.choice([-1, 500000]),
58
    "partition_filters": lambda: random.randint(0, 1),
59
    "pause_background_one_in": 1000000,
60 61 62
    "prefixpercent": 5,
    "progress_reports": 0,
    "readpercent": 45,
63
    "recycle_log_file_num": lambda: random.randint(0, 1),
64
    "reopen": 20,
65
    "snapshot_hold_ops": 100000,
66
    "long_running_snapshots": lambda: random.randint(0, 1),
67
    "subcompactions": lambda: random.randint(1, 4),
68 69
    "target_file_size_base": 2097152,
    "target_file_size_multiplier": 2,
70 71
    "use_direct_reads": lambda: random.randint(0, 1),
    "use_direct_io_for_flush_and_compaction": lambda: random.randint(0, 1),
72 73
    "use_full_merge_v1": lambda: random.randint(0, 1),
    "use_merge": lambda: random.randint(0, 1),
74 75 76
    "verify_checksum": 1,
    "write_buffer_size": 4 * 1024 * 1024,
    "writepercent": 35,
77
    "format_version": lambda: random.choice([2, 3, 4, 5, 5]),
78
    "index_block_restart_interval": lambda: random.choice(range(1, 16)),
A
anand76 已提交
79
    "use_multiget" : lambda: random.randint(0, 1),
80 81 82
    "periodic_compaction_seconds" :
        lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
    "compaction_ttl" : lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
83 84 85
    # Test small max_manifest_file_size in a smaller chance, as most of the
    # time we wnat manifest history to be preserved to help debug
    "max_manifest_file_size" : lambda : random.choice(
S
sdong 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98
        [t * 16384 if t < 3 else 1024 * 1024 * 1024 for t in range(1, 30)]),
    # Sync mode might make test runs slower so running it in a smaller chance
    "sync" : lambda : random.choice(
        [0 if t == 0 else 1 for t in range(1, 20)]),
    "compaction_readahead_size" : lambda : random.choice(
        [0, 0, 1024 * 1024]),
    "db_write_buffer_size" : lambda: random.choice(
        [0, 0, 0, 1024 * 1024, 8 * 1024 * 1024, 128 * 1024 * 1024]),
    "avoid_unnecessary_blocking_io" : random.randint(0, 1),
    "write_dbid_to_manifest" : random.randint(0, 1),
    "max_write_batch_group_size_bytes" : lambda: random.choice(
        [16, 64, 1024 * 1024, 16 * 1024 * 1024]),
    "level_compaction_dynamic_level_bytes" : True,
99
}
100

101 102
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'

103

104
def get_dbname(test_name):
105
    test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
S
sdong 已提交
106
    if test_tmpdir is None or test_tmpdir == "":
107
        dbname = tempfile.mkdtemp(prefix='rocksdb_crashtest_' + test_name)
S
sdong 已提交
108
    else:
109
        dbname = test_tmpdir + "/rocksdb_crashtest_" + test_name
110
        shutil.rmtree(dbname, True)
111
        os.mkdir(dbname)
112 113
    return dbname

114 115 116 117 118 119 120 121 122 123

def is_direct_io_supported(dbname):
    with tempfile.NamedTemporaryFile(dir=dbname) as f:
        try:
            os.open(f.name, os.O_DIRECT)
        except:
            return False
        return True


124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
blackbox_default_params = {
    # total time for this script to test db_stress
    "duration": 6000,
    # time for one db_stress instance to run
    "interval": 120,
    # since we will be killing anyway, use large value for ops_per_thread
    "ops_per_thread": 100000000,
    "set_options_one_in": 10000,
    "test_batches_snapshots": 1,
}

whitebox_default_params = {
    "duration": 10000,
    "log2_keys_per_lock": 10,
    "ops_per_thread": 200000,
139
    "random_kill_odd": 888887,
140
    "test_batches_snapshots": lambda: random.randint(0, 1),
141 142 143
}

simple_default_params = {
144
    "allow_concurrent_memtable_write": lambda: random.randint(0, 1),
145
    "column_families": 1,
146 147 148
    "max_background_compactions": 1,
    "max_bytes_for_level_base": 67108864,
    "memtablerep": "skip_list",
149 150 151
    "prefixpercent": 0,
    "readpercent": 50,
    "prefix_size" : -1,
152 153 154 155
    "target_file_size_base": 16777216,
    "target_file_size_multiplier": 1,
    "test_batches_snapshots": 0,
    "write_buffer_size": 32 * 1024 * 1024,
S
sdong 已提交
156
    "level_compaction_dynamic_level_bytes": False,
157 158 159 160 161 162 163
}

blackbox_simple_default_params = {
    "open_files": -1,
    "set_options_one_in": 0,
}

164
whitebox_simple_default_params = {}
165

166 167
cf_consistency_params = {
    "disable_wal": lambda: random.randint(0, 1),
168
    "reopen": 0,
169
    "test_cf_consistency": 1,
170 171 172
    # use small value for write_buffer_size so that RocksDB triggers flush
    # more frequently
    "write_buffer_size": 1024 * 1024,
173
    "enable_pipelined_write": lambda: random.randint(0, 1),
174 175
}

176 177
txn_params = {
    "use_txn" : 1,
178 179
    # Avoid lambda to set it once for the entire test
    "txn_write_policy": random.randint(0, 2),
180
    "unordered_write": random.randint(0, 1),
181 182 183 184 185 186
    "disable_wal": 0,
    # OpenReadOnly after checkpoint is not currnetly compatible with WritePrepared txns
    "checkpoint_one_in": 0,
    # pipeline write is not currnetly compatible with WritePrepared txns
    "enable_pipelined_write": 0,
}
187

188 189 190
def finalize_and_sanitize(src_params):
    dest_params = dict([(k,  v() if callable(v) else v)
                        for (k, v) in src_params.items()])
191 192 193
    if dest_params.get("compression_type") != "zstd" or \
            dest_params.get("compression_max_dict_bytes") == 0:
        dest_params["compression_zstd_max_train_bytes"] = 0
194
    if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
195
        dest_params["memtablerep"] = "skip_list"
196 197 198 199
    if dest_params["mmap_read"] == 1 or not is_direct_io_supported(
            dest_params["db"]):
        dest_params["use_direct_io_for_flush_and_compaction"] = 0
        dest_params["use_direct_reads"] = 0
200 201 202
    # DeleteRange is not currnetly compatible with Txns
    if dest_params.get("test_batches_snapshots") == 1 or \
            dest_params.get("use_txn") == 1:
203 204
        dest_params["delpercent"] += dest_params["delrangepercent"]
        dest_params["delrangepercent"] = 0
205 206 207 208
    # Only under WritePrepared txns, unordered_write would provide the same guarnatees as vanilla rocksdb
    if dest_params.get("unordered_write", 0) == 1:
        dest_params["txn_write_policy"] = 1
        dest_params["allow_concurrent_memtable_write"] = 1
209 210
    if dest_params.get("disable_wal", 0) == 1:
        dest_params["atomic_flush"] = 1
211 212 213 214 215
    if dest_params.get("open_files", 1) != -1:
        # Compaction TTL and periodic compactions are only compatible
        # with open_files = -1
        dest_params["compaction_ttl"] = 0
        dest_params["periodic_compaction_seconds"] = 0
216 217 218 219
    if dest_params.get("compaction_style", 0) == 2:
        # Disable compaction TTL in FIFO compaction, because right
        # now assertion failures are triggered.
        dest_params["compaction_ttl"] = 0
220
        dest_params["periodic_compaction_seconds"] = 0
221
    if dest_params["partition_filters"] == 1:
222 223 224 225
        if dest_params["index_type"] != 2:
            dest_params["partition_filters"] = 0
        else:
            dest_params["use_block_based_filter"] = 0
226 227 228
    if dest_params.get("atomic_flush", 0) == 1:
        # disable pipelined write when atomic flush is used.
        dest_params["enable_pipelined_write"] = 0
229 230
    return dest_params

231 232 233
def gen_cmd_params(args):
    params = {}

234 235 236 237 238
    params.update(default_params)
    if args.test_type == 'blackbox':
        params.update(blackbox_default_params)
    if args.test_type == 'whitebox':
        params.update(whitebox_default_params)
239 240 241 242 243 244
    if args.simple:
        params.update(simple_default_params)
        if args.test_type == 'blackbox':
            params.update(blackbox_simple_default_params)
        if args.test_type == 'whitebox':
            params.update(whitebox_simple_default_params)
245 246
    if args.cf_consistency:
        params.update(cf_consistency_params)
247 248
    if args.txn:
        params.update(txn_params)
249 250 251 252 253 254 255

    for k, v in vars(args).items():
        if v is not None:
            params[k] = v
    return params


256
def gen_cmd(params, unknown_params):
257
    cmd = ['./db_stress'] + [
258 259
        '--{0}={1}'.format(k, v)
        for k, v in finalize_and_sanitize(params).items()
260
        if k not in set(['test_type', 'simple', 'duration', 'interval',
261
                         'random_kill_odd', 'cf_consistency', 'txn'])
262
        and v is not None] + unknown_params
263 264 265 266 267
    return cmd


# This script runs and kills db_stress multiple times. It checks consistency
# in case of unsafe crashes in RocksDB.
268
def blackbox_crash_main(args, unknown_args):
269
    cmd_params = gen_cmd_params(args)
S
Shusen Liu 已提交
270
    dbname = get_dbname('blackbox')
271 272 273 274
    exit_time = time.time() + cmd_params['duration']

    print("Running blackbox-crash-test with \n"
          + "interval_between_crash=" + str(cmd_params['interval']) + "\n"
275
          + "total-duration=" + str(cmd_params['duration']) + "\n")
I
Igor Canadi 已提交
276

277 278
    while time.time() < exit_time:
        run_had_errors = False
279 280
        killtime = time.time() + cmd_params['interval']

281 282
        cmd = gen_cmd(dict(
            cmd_params.items() +
283
            {'db': dbname}.items()), unknown_args)
284

285
        child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
286
        print("Running db_stress with pid=%d: %s\n\n"
287
              % (child.pid, ' '.join(cmd)))
288

L
Lei Jin 已提交
289
        stop_early = False
290
        while time.time() < killtime:
L
Lei Jin 已提交
291 292 293 294 295 296
            if child.poll() is not None:
                print("WARNING: db_stress ended before kill: exitcode=%d\n"
                      % child.returncode)
                stop_early = True
                break
            time.sleep(1)
297

L
Lei Jin 已提交
298 299 300 301 302 303 304 305
        if not stop_early:
            if child.poll() is not None:
                print("WARNING: db_stress ended before kill: exitcode=%d\n"
                      % child.returncode)
            else:
                child.kill()
                print("KILLED %d\n" % child.pid)
                time.sleep(1)  # time to stabilize after a kill
306 307 308

        while True:
            line = child.stderr.readline().strip()
309 310 311
            if line == '':
                break
            elif not line.startswith('WARNING'):
312
                run_had_errors = True
313 314
                print('stderr has error message:')
                print('***' + line + '***')
315

316 317 318 319
        if run_had_errors:
            sys.exit(2)

        time.sleep(1)  # time to stabilize before the next run
320

I
Igor Canadi 已提交
321
    # we need to clean up after ourselves -- only do this on test success
S
Shusen Liu 已提交
322
    shutil.rmtree(dbname, True)
I
Igor Canadi 已提交
323

324 325 326

# This python script runs db_stress multiple times. Some runs with
# kill_random_test that causes rocksdb to crash at various points in code.
327
def whitebox_crash_main(args, unknown_args):
328
    cmd_params = gen_cmd_params(args)
S
Shusen Liu 已提交
329
    dbname = get_dbname('whitebox')
330 331 332 333 334 335

    cur_time = time.time()
    exit_time = cur_time + cmd_params['duration']
    half_time = cur_time + cmd_params['duration'] / 2

    print("Running whitebox-crash-test with \n"
336
          + "total-duration=" + str(cmd_params['duration']) + "\n")
337 338 339

    total_check_mode = 4
    check_mode = 0
340
    kill_random_test = cmd_params['random_kill_odd']
341 342 343 344 345 346 347 348
    kill_mode = 0

    while time.time() < exit_time:
        if check_mode == 0:
            additional_opts = {
                # use large ops per thread since we will kill it anyway
                "ops_per_thread": 100 * cmd_params['ops_per_thread'],
            }
349 350 351 352
            # run with kill_random_test, with three modes.
            # Mode 0 covers all kill points. Mode 1 covers less kill points but
            # increases change of triggering them. Mode 2 covers even less
            # frequent kill points and further increases triggering change.
353 354 355 356 357
            if kill_mode == 0:
                additional_opts.update({
                    "kill_random_test": kill_random_test,
                })
            elif kill_mode == 1:
358 359 360 361
                if cmd_params.get('disable_wal', 0) == 1:
                    my_kill_odd = kill_random_test / 50 + 1
                else:
                    my_kill_odd = kill_random_test / 10 + 1
362
                additional_opts.update({
363
                    "kill_random_test": my_kill_odd,
364 365 366
                    "kill_prefix_blacklist": "WritableFileWriter::Append,"
                    + "WritableFileWriter::WriteBuffered",
                })
367
            elif kill_mode == 2:
368 369
                # TODO: May need to adjust random odds if kill_random_test
                # is too small.
370
                additional_opts.update({
371
                    "kill_random_test": (kill_random_test / 5000 + 1),
372 373 374 375 376 377
                    "kill_prefix_blacklist": "WritableFileWriter::Append,"
                    "WritableFileWriter::WriteBuffered,"
                    "PosixMmapFile::Allocate,WritableFileWriter::Flush",
                })
            # Run kill mode 0, 1 and 2 by turn.
            kill_mode = (kill_mode + 1) % 3
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
        elif check_mode == 1:
            # normal run with universal compaction mode
            additional_opts = {
                "kill_random_test": None,
                "ops_per_thread": cmd_params['ops_per_thread'],
                "compaction_style": 1,
            }
        elif check_mode == 2:
            # normal run with FIFO compaction mode
            # ops_per_thread is divided by 5 because FIFO compaction
            # style is quite a bit slower on reads with lot of files
            additional_opts = {
                "kill_random_test": None,
                "ops_per_thread": cmd_params['ops_per_thread'] / 5,
                "compaction_style": 2,
            }
        else:
            # normal run
396
            additional_opts = {
397 398 399 400
                "kill_random_test": None,
                "ops_per_thread": cmd_params['ops_per_thread'],
            }

S
Shusen Liu 已提交
401
        cmd = gen_cmd(dict(cmd_params.items() + additional_opts.items()
402
                           + {'db': dbname}.items()), unknown_args)
403

M
Mark Isaacson 已提交
404
        print "Running:" + ' '.join(cmd) + "\n"  # noqa: E999 T25377293 Grandfathered in
405

406 407
        popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT)
408 409 410 411 412 413 414 415 416 417 418
        stdoutdata, stderrdata = popen.communicate()
        retncode = popen.returncode
        msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
               check_mode, additional_opts['kill_random_test'], retncode))
        print msg
        print stdoutdata

        expected = False
        if additional_opts['kill_random_test'] is None and (retncode == 0):
            # we expect zero retncode if no kill option
            expected = True
419 420 421
        elif additional_opts['kill_random_test'] is not None and retncode <= 0:
            # When kill option is given, the test MIGHT kill itself.
            # If it does, negative retncode is expected. Otherwise 0.
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
            expected = True

        if not expected:
            print "TEST FAILED. See kill option and exit code above!!!\n"
            sys.exit(1)

        stdoutdata = stdoutdata.lower()
        errorcount = (stdoutdata.count('error') -
                      stdoutdata.count('got errors 0 times'))
        print "#times error occurred in output is " + str(errorcount) + "\n"

        if (errorcount > 0):
            print "TEST FAILED. Output has 'error'!!!\n"
            sys.exit(2)
        if (stdoutdata.find('fail') >= 0):
            print "TEST FAILED. Output has 'fail'!!!\n"
            sys.exit(2)

        # First half of the duration, keep doing kill test. For the next half,
        # try different modes.
        if time.time() > half_time:
            # we need to clean up after ourselves -- only do this on test
            # success
S
Shusen Liu 已提交
445
            shutil.rmtree(dbname, True)
446
            os.mkdir(dbname)
447
            cmd_params.pop('expected_values_path', None)
448 449 450 451 452 453 454 455 456 457
            check_mode = (check_mode + 1) % total_check_mode

        time.sleep(1)  # time to stabilize after a kill


def main():
    parser = argparse.ArgumentParser(description="This script runs and kills \
        db_stress multiple times")
    parser.add_argument("test_type", choices=["blackbox", "whitebox"])
    parser.add_argument("--simple", action="store_true")
458
    parser.add_argument("--cf_consistency", action='store_true')
459
    parser.add_argument("--txn", action='store_true')
460 461 462 463 464 465 466 467 468 469

    all_params = dict(default_params.items()
                      + blackbox_default_params.items()
                      + whitebox_default_params.items()
                      + simple_default_params.items()
                      + blackbox_simple_default_params.items()
                      + whitebox_simple_default_params.items())

    for k, v in all_params.items():
        parser.add_argument("--" + k, type=type(v() if callable(v) else v))
470 471
    # unknown_args are passed directly to db_stress
    args, unknown_args = parser.parse_known_args()
472

473 474 475 476 477 478
    test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
    if test_tmpdir is not None and not os.path.isdir(test_tmpdir):
        print('%s env var is set to a non-existent directory: %s' %
                (_TEST_DIR_ENV_VAR, test_tmpdir))
        sys.exit(1)

479
    if args.test_type == 'blackbox':
480
        blackbox_crash_main(args, unknown_args)
481
    if args.test_type == 'whitebox':
482
        whitebox_crash_main(args, unknown_args)
483 484 485

if __name__ == '__main__':
    main()