batches.rb 5.1 KB
Newer Older
P
Pratik Naik 已提交
1
module ActiveRecord
X
Xavier Noria 已提交
2
  module Batches
3
    # Looping through a collection of records from the database
V
Vijay Dev 已提交
4 5
    # (using the +all+ method, for example) is very inefficient
    # since it will try to instantiate all the objects at once.
6
    #
V
Vijay Dev 已提交
7 8
    # In that case, batch processing methods allow you to work
    # with the records in batches, thereby greatly reducing memory consumption.
9
    #
O
Oscar Del Ben 已提交
10 11
    # The #find_each method uses #find_in_batches with a batch size of 1000 (or as
    # specified by the +:batch_size+ option).
P
Pratik Naik 已提交
12
    #
13
    #   Person.find_each do |person|
14 15
    #     person.do_awesome_stuff
    #   end
P
Pratik Naik 已提交
16 17 18 19 20
    #
    #   Person.where("age > 21").find_each do |person|
    #     person.party_all_night!
    #   end
    #
21 22 23 24 25 26 27
    # If you do not provide a block to #find_each, it will return an Enumerator
    # for chaining with other methods:
    #
    #   Person.find_each.with_index do |person, index|
    #     person.award_trophy(index + 1)
    #   end
    #
28 29
    # ==== Options
    # * <tt>:batch_size</tt> - Specifies the size of the batch. Default to 1000.
M
Michael Dawson 已提交
30
    # * <tt>:start</tt> - Specifies the primary key value to start from.
31 32 33 34 35
    # This is especially useful if you want multiple workers dealing with
    # the same processing queue. You can make worker 1 handle all the records
    # between id 0 and 10,000 and worker 2 handle from 10,000 and beyond
    # (by setting the +:start+ option on that worker).
    #
V
Vipul A M 已提交
36
    #   # Let's process for a batch of 2000 records, skipping the first 2000 rows
37 38 39 40 41 42
    #   Person.find_each(start: 2000, batch_size: 2000) do |person|
    #     person.party_all_night!
    #   end
    #
    # NOTE: It's not possible to set the order. That is automatically set to
    # ascending on the primary key ("id ASC") to make the batch ordering
43 44
    # work. This also means that this method only works when the primary key is
    # orderable (e.g. an integer or string).
45 46 47
    #
    # NOTE: You can't set the limit either, that's used to control
    # the batch sizes.
P
Pratik Naik 已提交
48
    def find_each(options = {})
49 50 51 52 53
      if block_given?
        find_in_batches(options) do |records|
          records.each { |record| yield record }
        end
      else
54 55 56
        enum_for :find_each, options do
          options[:start] ? where(table[primary_key].gteq(options[:start])).size : size
        end
P
Pratik Naik 已提交
57 58 59 60
      end
    end

    # Yields each batch of records that was found by the find +options+ as
61
    # an array.
P
Pratik Naik 已提交
62 63 64 65 66
    #
    #   Person.where("age > 21").find_in_batches do |group|
    #     sleep(50) # Make sure it doesn't get too crowded in there!
    #     group.each { |person| person.party_all_night! }
    #   end
67
    #
68 69 70 71 72 73 74 75
    # If you do not provide a block to #find_in_batches, it will return an Enumerator
    # for chaining with other methods:
    #
    #   Person.find_in_batches.with_index do |group, batch|
    #     puts "Processing group ##{batch}"
    #     group.each(&:recover_from_last_night!)
    #   end
    #
76 77
    # To be yielded each record one by one, use #find_each instead.
    #
78 79
    # ==== Options
    # * <tt>:batch_size</tt> - Specifies the size of the batch. Default to 1000.
M
Michael Dawson 已提交
80
    # * <tt>:start</tt> - Specifies the primary key value to start from.
81 82 83 84 85
    # This is especially useful if you want multiple workers dealing with
    # the same processing queue. You can make worker 1 handle all the records
    # between id 0 and 10,000 and worker 2 handle from 10,000 and beyond
    # (by setting the +:start+ option on that worker).
    #
V
Vijay Dev 已提交
86
    #   # Let's process the next 2000 records
87
    #   Person.find_in_batches(start: 2000, batch_size: 2000) do |group|
88 89
    #     group.each { |person| person.party_all_night! }
    #   end
90 91 92
    #
    # NOTE: It's not possible to set the order. That is automatically set to
    # ascending on the primary key ("id ASC") to make the batch ordering
93 94
    # work. This also means that this method only works when the primary key is
    # orderable (e.g. an integer or string).
95 96 97
    #
    # NOTE: You can't set the limit either, that's used to control
    # the batch sizes.
P
Pratik Naik 已提交
98
    def find_in_batches(options = {})
99 100
      options.assert_valid_keys(:start, :batch_size)

P
Pratik Naik 已提交
101
      relation = self
102 103 104 105 106 107 108 109 110
      start = options[:start]
      batch_size = options[:batch_size] || 1000

      unless block_given?
        return to_enum(:find_in_batches, options) do
          total = start ? where(table[primary_key].gteq(start)).size : size
          (total - 1).div(batch_size) + 1
        end
      end
P
Pratik Naik 已提交
111

112 113
      if logger && (arel.orders.present? || arel.taken.present?)
        logger.warn("Scoped order and limit are ignored, it's forced to be batch order and batch size")
A
Aaron Patterson 已提交
114
      end
115

116
      relation = relation.reorder(batch_order).limit(batch_size)
117
      records = start ? relation.where(table[primary_key].gteq(start)).to_a : relation.to_a
P
Pratik Naik 已提交
118 119

      while records.any?
120
        records_size = records.size
121
        primary_key_offset = records.last.id
122
        raise "Primary key not included in the custom select clause" unless primary_key_offset
123

P
Pratik Naik 已提交
124 125
        yield records

126
        break if records_size < batch_size
127

128
        records = relation.where(table[primary_key].gt(primary_key_offset)).to_a
P
Pratik Naik 已提交
129 130 131 132 133 134
      end
    end

    private

    def batch_order
135
      "#{quoted_table_name}.#{quoted_primary_key} ASC"
P
Pratik Naik 已提交
136 137
    end
  end
138
end