redis-trib.rb 23.3 KB
Newer Older
1 2
#!/usr/bin/env ruby

3
# TODO (temporary here, we'll move this into the Github issues once
G
guiquanz 已提交
4
#       redis-trib initial implementation is completed).
5
#
6 7 8 9 10 11 12 13 14 15 16 17 18 19
# - Make sure that if the rehashing fails in the middle redis-trib will try
#   to recover.
# - When redis-trib performs a cluster check, if it detects a slot move in
#   progress it should prompt the user to continue the move from where it
#   stopped.
# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop
#   while rehashing, and performing the best cleanup possible if the user
#   forces the quit.
# - When doing "fix" set a global Fix to true, and prompt the user to
#   fix the problem if automatically fixable every time there is something
#   to fix. For instance:
#   1) If there is a node that pretend to receive a slot, or to migrate a
#      slot, but has no entries in that slot, fix it.
#   2) If there is a node having keys in slots that are not owned by it
G
guiquanz 已提交
20
#      fix this condition moving the entries in the same node.
21 22
#   3) Perform more possibly slow tests about the state of the cluster.
#   4) When aborted slot migration is detected, fix it.
23

24 25 26
require 'rubygems'
require 'redis'

27
ClusterHashSlots = 16384
28

29
def xputs(s)
30 31 32 33 34 35 36 37 38 39 40 41 42
    case s[0..2]
    when ">>>"
        color="29;1"
    when "[ER"
        color="31;1"
    when "[OK"
        color="32"
    when "[FA","***"
        color="33"
    else
        color=nil
    end

43
    color = nil if ENV['TERM'] != "xterm"
44 45 46 47
    print "\033[#{color}m" if color
    print s
    print "\033[0m" if color
    print "\n"
48
end
49

50 51 52
class ClusterNode
    def initialize(addr)
        s = addr.split(":")
53
        if s.length != 2
A
antirez 已提交
54
            puts "Invalid node name #{addr}"
55 56
            exit 1
        end
57
        @r = nil
58 59 60 61
        @info = {}
        @info[:host] = s[0]
        @info[:port] = s[1]
        @info[:slots] = {}
62 63
        @info[:migrating] = {}
        @info[:importing] = {}
64
        @dirty = false # True if we need to flush slots info into node.
65
        @friends = []
66 67
    end

68 69 70 71 72
    def friends
        @friends
    end

    def slots 
73
        @info[:slots]
74 75
    end

76 77 78 79
    def has_flag?(flag)
        @info[:flags].index(flag)
    end

80
    def to_s
81
        "#{@info[:host]}:#{@info[:port]}"
82 83
    end

84
    def connect(o={})
85
        return if @r
86 87
        print "Connecting to node #{self}: "
        STDOUT.flush
88
        begin
89
            @r = Redis.new(:host => @info[:host], :port => @info[:port])
90
            @r.ping
91
        rescue
92
            xputs "[ERR] Sorry, can't connect to node #{self}"
93 94
            exit 1 if o[:abort]
            @r = nil
95
        end
96
        xputs "OK"
97 98
    end

99 100 101
    def assert_cluster
        info = @r.info
        if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0
102
            xputs "[ERR] Node #{self} is not configured as a cluster node."
103 104 105 106
            exit 1
        end
    end

107 108 109
    def assert_empty
        if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) ||
            (@r.info['db0'])
110
            xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0."
111 112 113 114
            exit 1
        end
    end

115 116 117 118 119
    def load_info(o={})
        self.connect
        nodes = @r.cluster("nodes").split("\n")
        nodes.each{|n|
            # name addr flags role ping_sent ping_recv link_status slots
120 121 122
            split = n.split
            name,addr,flags,role,ping_sent,ping_recv,link_status = split[0..6]
            slots = split[7..-1]
123 124 125 126 127 128 129 130 131 132
            info = {
                :name => name,
                :addr => addr,
                :flags => flags.split(","),
                :role => role,
                :ping_sent => ping_sent.to_i,
                :ping_recv => ping_recv.to_i,
                :link_status => link_status
            }
            if info[:flags].index("myself")
133 134
                @info = @info.merge(info)
                @info[:slots] = {}
135 136
                slots.each{|s|
                    if s[0..0] == '['
137
                        if s.index("->-") # Migrating
138 139
                            slot,dst = s[1..-1].split("->-")
                            @info[:migrating][slot] = dst
140
                        elsif s.index("-<-") # Importing
141 142 143
                            slot,src = s[1..-1].split("-<-")
                            @info[:importing][slot] = src
                        end
144
                    elsif s.index("-")
145 146 147 148 149
                        start,stop = s.split("-")
                        self.add_slots((start.to_i)..(stop.to_i))
                    else
                        self.add_slots((s.to_i)..(s.to_i))
                    end
150
                } if slots
151
                @dirty = false
152 153 154
                @r.cluster("info").split("\n").each{|e|    
                    k,v=e.split(":")
                    k = k.to_sym
155
                    v.chop!
156 157 158 159 160 161
                    if k != :cluster_state
                        @info[k] = v.to_i
                    else
                        @info[k] = v
                    end
                }
162 163 164 165 166 167
            elsif o[:getfriends]
                @friends << info
            end
        }
    end

168 169
    def add_slots(slots)
        slots.each{|s|
170
            @info[:slots][s] = :new
171 172 173 174 175 176 177
        }
        @dirty = true
    end

    def flush_node_config
        return if !@dirty
        new = []
178
        @info[:slots].each{|s,val|
179 180
            if val == :new
                new << s
181
                @info[:slots][s] = true
182 183 184 185 186 187
            end
        }
        @r.cluster("addslots",*new)
        @dirty = false
    end

188
    def info_string
189
        # We want to display the hash slots assigned to this node
A
antirez 已提交
190
        # as ranges, like in: "1-5,8-9,20-25,30"
191 192 193 194 195 196
        #
        # Note: this could be easily written without side effects,
        # we use 'slots' just to split the computation into steps.
        
        # First step: we want an increasing array of integers
        # for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30]
197
        slots = @info[:slots].keys.sort
198

G
guiquanz 已提交
199
        # As we want to aggregate adjacent slots we convert all the
200 201
        # slot integers into ranges (with just one element)
        # So we have something like [1..1,2..2, ... and so forth.
A
antirez 已提交
202
        slots.map!{|x| x..x}
203

G
guiquanz 已提交
204
        # Finally we group ranges with adjacent elements.
205 206 207
        slots = slots.reduce([]) {|a,b|
            if !a.empty? && b.first == (a[-1].last)+1
                a[0..-2] + [(a[-1].first)..(b.last)]
208
            else
209
                a + [b]
210
            end
211 212 213 214 215 216 217
        }

        # Now our task is easy, we just convert ranges with just one
        # element into a number, and a real range into a start-end format.
        # Finally we join the array using the comma as separator.
        slots = slots.map{|x|
            x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}"
218
        }.join(",")
219

220
        "[#{@info[:cluster_state].upcase} #{(self.info[:flags]-["myself"]).join(",")}] #{self.info[:name]} #{self.to_s} slots:#{slots} (#{self.slots.length} slots)"
221
    end
222

223 224 225 226 227 228 229 230
    # Return a single string representing nodes and associated slots.
    # TODO: remove slaves from config when slaves will be handled
    # by Redis Cluster.
    def get_config_signature
        config = []
        @r.cluster("nodes").each_line{|l|
            s = l.split
            slots = s[7..-1].select {|x| x[0..0] != "["}
231
            next if slots.length == 0
232 233 234 235 236
            config << s[0]+":"+(slots.sort.join(","))
        }
        config.sort.join("|")
    end

237
    def info
238
        @info
239
    end
240 241 242 243 244
    
    def is_dirty?
        @dirty
    end

245 246 247 248 249 250
    def r
        @r
    end
end

class RedisTrib
251 252
    def initialize
        @nodes = []
253 254
        @fix = false
        @errors = []
255 256
    end

257 258 259
    def check_arity(req_args, num_args)
        if ((req_args > 0 and num_args != req_args) ||
           (req_args < 0 and num_args < req_args.abs))
260
           xputs "[ERR] Wrong number of arguments for specified sub command"
261 262 263 264
           exit 1
        end
    end

265 266 267 268
    def add_node(node)
        @nodes << node
    end

269 270
    def cluster_error(msg)
        @errors << msg
271
        xputs msg
272 273
    end

274 275 276 277 278 279 280
    def get_node_by_name(name)
        @nodes.each{|n|
            return n if n.info[:name] == name.downcase
        }
        return nil
    end

281
    def check_cluster
282
        xputs ">>> Performing Cluster Check (using node #{@nodes[0]})"
283
        show_nodes
284
        check_config_consistency
285
        check_open_slots
286 287 288 289 290 291
        check_slots_coverage
    end

    # Merge slots of every known node. If the resulting slots are equal
    # to ClusterHashSlots, then all slots are served.
    def covered_slots
292 293 294 295
        slots = {}
        @nodes.each{|n|
            slots = slots.merge(n.slots)
        }
296 297 298 299
        slots
    end

    def check_slots_coverage
300
        xputs ">>> Check slots coverage..."
301
        slots = covered_slots
302
        if slots.length == ClusterHashSlots
303
            xputs "[OK] All #{ClusterHashSlots} slots covered."
304
        else
305
            cluster_error \
306
                "[ERR] Not all #{ClusterHashSlots} slots are covered by nodes."
A
antirez 已提交
307
            fix_slots_coverage if @fix
308
        end
309 310
    end

311
    def check_open_slots
312
        xputs ">>> Check for open slots..."
313 314 315
        open_slots = []
        @nodes.each{|n|
            if n.info[:migrating].size > 0
316 317
                cluster_error \
                    "[WARNING] Node #{n} has slots in migrating state."
318 319
                open_slots += n.info[:migrating].keys
            elsif n.info[:importing].size > 0
320 321
                cluster_error \
                    "[WARNING] Node #{n} has slots in importing state."
322 323 324 325
                open_slots += n.info[:importing].keys
            end
        }
        open_slots.uniq!
326
        if open_slots.length > 0
327
            xputs "[WARNING] The following slots are open: #{open_slots.join(",")}"
328 329 330 331 332 333
        end
        if @fix
            open_slots.each{|slot| fix_open_slot slot}
        end
    end

334 335 336 337 338 339 340 341
    def nodes_with_keys_in_slot(slot)
        nodes = []
        @nodes.each{|n|
            nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0
        }
        nodes
    end

342
    def fix_slots_coverage
343
        not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys
344 345
        xputs ">>> Fixing slots coverage..."
        xputs "List of not covered slots: " + not_covered.join(",")
346 347 348 349 350 351 352 353 354

        # For every slot, take action depending on the actual condition:
        # 1) No node has keys for this slot.
        # 2) A single node has keys for this slot.
        # 3) Multiple nodes have keys for this slot.
        slots = {}
        not_covered.each{|slot|
            nodes = nodes_with_keys_in_slot(slot)
            slots[slot] = nodes
355
            xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join}"
356 357 358 359 360 361
        }

        none = slots.select {|k,v| v.length == 0}
        single = slots.select {|k,v| v.length == 1}
        multi = slots.select {|k,v| v.length > 1}

362 363
        # Handle case "1": keys in no node.
        if none.length > 0
364 365
            xputs "The folowing uncovered slots have no keys across the cluster:"
            xputs none.keys.join(",")
366 367 368
            yes_or_die "Fix these slots by covering with a random node?"
            none.each{|slot,nodes|
                node = @nodes.sample
369
                xputs ">>> Covering slot #{slot} with #{node}"
370 371 372 373 374 375
                node.r.cluster("addslots",slot)
            }
        end

        # Handle case "2": keys only in one node.
        if single.length > 0
376
            xputs "The folowing uncovered slots have keys in just one node:"
377
            puts single.keys.join(",")
378
            yes_or_die "Fix these slots by covering with those nodes?"
379
            single.each{|slot,nodes|
380
                xputs ">>> Covering slot #{slot} with #{nodes[0]}"
381 382 383
                nodes[0].r.cluster("addslots",slot)
            }
        end
384 385 386

        # Handle case "3": keys in multiple nodes.
        if multi.length > 0
387 388
            xputs "The folowing uncovered slots have keys in multiple nodes:"
            xputs multi.keys.join(",")
389 390
            yes_or_die "Fix these slots by moving keys into a single node?"
            multi.each{|slot,nodes|
391
                xputs ">>> Covering slot #{slot} moving keys to #{nodes[0]}"
392 393 394 395 396 397 398 399 400
                # TODO
                # 1) Set all nodes as "MIGRATING" for this slot, so that we
                # can access keys in the hash slot using ASKING.
                # 2) Move everything to node[0]
                # 3) Clear MIGRATING from nodes, and ADDSLOTS the slot to
                # node[0].
                raise "TODO: Work in progress"
            }
        end
401 402
    end

403 404 405 406 407 408 409 410 411 412 413 414 415
    # Slot 'slot' was found to be in importing or migrating state in one or
    # more nodes. This function fixes this condition by migrating keys where
    # it seems more sensible.
    def fix_open_slot(slot)
        migrating = []
        importing = []
        @nodes.each{|n|
            next if n.has_flag? "slave"
            if n.info[:migrating][slot]
                migrating << n
            elsif n.info[:importing][slot]
                importing << n
            elsif n.r.cluster("countkeysinslot",slot) > 0
416
                xputs "*** Found keys about slot #{slot} in node #{n}!"
417 418
            end
        }
419
        puts ">>> Fixing open slot #{slot}"
420 421 422 423 424 425
        puts "Set as migrating in: #{migrating.join(",")}"
        puts "Set as importing in: #{importing.join(",")}"

        # Case 1: The slot is in migrating state in one slot, and in
        #         importing state in 1 slot. That's trivial to address.
        if migrating.length == 1 && importing.length == 1
A
antirez 已提交
426
            move_slot(migrating[0],importing[0],slot,:verbose=>true)
427
        else
428
            xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress)"
429 430 431
        end
    end

432 433 434 435 436 437 438
    # Check if all the nodes agree about the cluster configuration
    def check_config_consistency
        signatures=[]
        @nodes.each{|n|
            signatures << n.get_config_signature
        }
        if signatures.uniq.length != 1
439
            cluster_error "[ERR] Nodes don't agree about configuration!"
440
        else
441
            xputs "[OK] All nodes agree about slots configuration."
442 443 444
        end
    end

445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
    def alloc_slots
        slots_per_node = ClusterHashSlots/@nodes.length
        i = 0
        @nodes.each{|n|
            first = i*slots_per_node
            last = first+slots_per_node-1
            last = ClusterHashSlots-1 if i == @nodes.length-1
            n.add_slots first..last
            i += 1
        }
    end

    def flush_nodes_config
        @nodes.each{|n|
            n.flush_node_config
        }
    end

    def show_nodes
        @nodes.each{|n|
465
            xputs n.info_string
466 467 468 469
        }
    end

    def join_cluster
470 471 472 473 474 475 476 477 478 479
        # We use a brute force approach to make sure the node will meet
        # each other, that is, sending CLUSTER MEET messages to all the nodes
        # about the very same node.
        # Thanks to gossip this information should propagate across all the
        # cluster in a matter of seconds.
        first = false
        @nodes.each{|n|
            if !first then first = n.info; next; end # Skip the first node
            n.r.cluster("meet",first[:host],first[:port])
        }
480 481 482 483 484 485
    end

    def yes_or_die(msg)
        print "#{msg} (type 'yes' to accept): "
        STDOUT.flush
        if !(STDIN.gets.chomp.downcase == "yes")
486
            xputs "*** Aborting..."
487 488
            exit 1
        end
489
    end
490

491
    def load_cluster_info_from_node(nodeaddr)
492
        node = ClusterNode.new(nodeaddr)
493 494
        node.connect(:abort => true)
        node.assert_cluster
495
        node.load_info(:getfriends => true)
496
        add_node(node)
497
        node.friends.each{|f|
498 499 500
            next if f[:flags].index("noaddr") ||
                    f[:flags].index("disconnected") ||
                    f[:flags].index("fail")
501 502 503 504 505
            fnode = ClusterNode.new(f[:addr])
            fnode.connect()
            fnode.load_info()
            add_node(fnode)
        }
506 507
    end

508 509 510 511 512
    # Given a list of source nodes return a "resharding plan"
    # with what slots to move in order to move "numslots" slots to another
    # instance.
    def compute_reshard_table(sources,numslots)
        moved = []
513
        # Sort from bigger to smaller instance, for two reasons:
514 515 516 517 518 519
        # 1) If we take less slots than instances it is better to start
        #    getting from the biggest instances.
        # 2) We take one slot more from the first instance in the case of not
        #    perfect divisibility. Like we have 3 nodes and need to get 10
        #    slots, we take 4 from the first, and 3 from the rest. So the
        #    biggest is always the first.
520
        sources = sources.sort{|a,b| b.slots.length <=> a.slots.length}
521 522 523
        source_tot_slots = sources.inject(0) {|sum,source|
            sum+source.slots.length
        }
524
        sources.each_with_index{|s,i|
525 526
            # Every node will provide a number of slots proportional to the
            # slots it has assigned.
527
            n = (numslots.to_f/source_tot_slots*s.slots.length)
528 529 530 531 532
            if i == 0
                n = n.ceil
            else
                n = n.floor
            end
533 534 535 536 537 538 539 540 541 542 543
            s.slots.keys.sort[(0...n)].each{|slot|
                if moved.length < numslots
                    moved << {:source => s, :slot => slot}
                end
            }
        }
        return moved
    end

    def show_reshard_table(table)
        table.each{|e|
544
            puts "    Moving slot #{e[:slot]} from #{e[:source].info[:name]}"
545 546 547
        }
    end

548
    def move_slot(source,target,slot,o={})
549 550
        # We start marking the slot as importing in the destination node,
        # and the slot as migrating in the target host. Note that the order of
551 552
        # the operations is important, as otherwise a client may be redirected
        # to the target node that does not yet know it is importing this slot.
553
        print "Moving slot #{slot} from #{source} to #{target}: "; STDOUT.flush
554
        target.r.cluster("setslot",slot,"importing",source.info[:name])
555
        source.r.cluster("setslot",slot,"migrating",target.info[:name])
556
        # Migrate all the keys from source to target using the MIGRATE command
557 558 559 560
        while true
            keys = source.r.cluster("getkeysinslot",slot,10)
            break if keys.length == 0
            keys.each{|key|
561
                source.r.migrate(target.info[:host],target.info[:port],key,0,1000)
562 563 564 565 566 567 568 569 570
                print "." if o[:verbose]
                STDOUT.flush
            }
        end
        puts
        # Set the new node as the owner of the slot in all the known nodes.
        @nodes.each{|n|
            n.r.cluster("setslot",slot,"node",target.info[:name])
        }
571 572
    end

573 574
    # redis-trib subcommands implementations

575 576 577 578 579 580 581
    def check_cluster_cmd
        load_cluster_info_from_node(ARGV[1])
        check_cluster
    end

    def fix_cluster_cmd
        @fix = true
582
        load_cluster_info_from_node(ARGV[1])
583 584 585
        check_cluster
    end

586 587
    def reshard_cluster_cmd
        load_cluster_info_from_node(ARGV[1])
588 589
        check_cluster
        if @errors.length != 0
590
            puts "*** Please fix your cluster problems before resharding"
591 592
            exit 1
        end
593
        numslots = 0
594 595
        while numslots <= 0 or numslots > ClusterHashSlots
            print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? "
596 597 598 599 600 601
            numslots = STDIN.gets.to_i
        end
        target = nil
        while not target
            print "What is the receiving node ID? "
            target = get_node_by_name(STDIN.gets.chop)
602
            if !target || target.has_flag?("slave")
603
                xputs "*** The specified node is not known or not a master, please retry."
604
                target = nil
605 606 607
            end
        end
        sources = []
608 609 610
        xputs "Please enter all the source node IDs."
        xputs "  Type 'all' to use all the nodes as source nodes for the hash slots."
        xputs "  Type 'done' once you entered all the source nodes IDs."
611 612 613 614 615 616 617 618 619 620 621 622 623 624
        while true
            print "Source node ##{sources.length+1}:"
            line = STDIN.gets.chop
            src = get_node_by_name(line)
            if line == "done"
                if sources.length == 0
                    puts "No source nodes given, operation aborted"
                    exit 1
                else
                    break
                end
            elsif line == "all"
                @nodes.each{|n|
                    next if n.info[:name] == target.info[:name]
625
                    next if n.has_flag?("slave")
626 627 628
                    sources << n
                }
                break
629
            elsif !src || src.has_flag?("slave")
630
                xputs "*** The specified node is not known or is not a master, please retry."
631
            elsif src.info[:name] == target.info[:name]
632
                xputs "*** It is not possible to use the target node as source node."
633 634 635
            else
                sources << src
            end
636
        end
637 638 639 640 641 642
        puts "\nReady to move #{numslots} slots."
        puts "  Source nodes:"
        sources.each{|s| puts "    "+s.info_string}
        puts "  Destination node:"
        puts "    #{target.info_string}"
        reshard_table = compute_reshard_table(sources,numslots)
643
        puts "  Resharding plan:"
644
        show_reshard_table(reshard_table)
645 646 647 648
        print "Do you want to proceed with the proposed reshard plan (yes/no)? "
        yesno = STDIN.gets.chop
        exit(1) if (yesno != "yes")
        reshard_table.each{|e|
649
            move_slot(e[:source],target,e[:slot],:verbose=>true)
650
        }
651 652
    end

653
    def create_cluster_cmd
654
        xputs ">>> Creating cluster"
655 656 657 658
        ARGV[1..-1].each{|n|
            node = ClusterNode.new(n)
            node.connect(:abort => true)
            node.assert_cluster
659
            node.load_info
660 661 662
            node.assert_empty
            add_node(node)
        }
663
        xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..."
664 665 666 667
        alloc_slots
        show_nodes
        yes_or_die "Can I set the above configuration?"
        flush_nodes_config
668 669
        xputs ">>> Nodes configuration updated"
        xputs ">>> Sending CLUSTER MEET messages to join the cluster"
670 671 672
        join_cluster
        check_cluster
    end
673 674

    def addnode_cluster_cmd
675
        xputs ">>> Adding node #{ARGV[1]} to cluster #{ARGV[2]}"
676 677 678 679 680 681 682 683 684 685 686 687 688 689

        # Check the existing cluster
        load_cluster_info_from_node(ARGV[2])
        check_cluster

        # Add the new node
        new = ClusterNode.new(ARGV[1])
        new.connect(:abort => true)
        new.assert_cluster
        new.load_info
        new.assert_empty
        first = @nodes.first.info

        # Send CLUSTER MEET command to the new node
690
        xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster."
691 692
        new.r.cluster("meet",first[:host],first[:port])
    end
A
antirez 已提交
693 694 695 696 697

    def help_cluster_cmd
        show_help
        exit 0
    end
698 699 700
end

COMMANDS={
701 702 703
    "create"  => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"],
    "check"   => ["check_cluster_cmd", 2, "host:port"],
    "fix"     => ["fix_cluster_cmd", 2, "host:port"],
704
    "reshard" => ["reshard_cluster_cmd", 2, "host:port"],
A
antirez 已提交
705 706
    "addnode" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"],
    "help"    => ["help_cluster_cmd", 1, "(show this help)"]
707 708
}

A
antirez 已提交
709
def show_help
710
    puts "Usage: redis-trib <command> <arguments ...>"
A
antirez 已提交
711 712
    puts
    COMMANDS.each{|k,v|
713
        puts "  #{k.ljust(10)} #{v[2]}"
A
antirez 已提交
714 715
    }
    puts
A
antirez 已提交
716 717 718 719 720
end

# Sanity check
if ARGV.length == 0
    show_help
721 722 723 724 725 726 727 728 729 730 731 732 733
    exit 1
end

rt = RedisTrib.new
cmd_spec = COMMANDS[ARGV[0].downcase]
if !cmd_spec
    puts "Unknown redis-trib subcommand '#{ARGV[0]}'"
    exit 1
end
rt.check_arity(cmd_spec[1],ARGV.length)

# Dispatch
rt.send(cmd_spec[0])