g1CollectedHeap.inline.hpp 8.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28 29
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
30
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
31
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 33
#include "utilities/taskqueue.hpp"

34 35 36 37 38 39 40 41 42 43 44 45 46 47
// Inline functions for G1CollectedHeap

inline HeapRegion*
G1CollectedHeap::heap_region_containing(const void* addr) const {
  HeapRegion* hr = _hrs->addr_to_region(addr);
  // hr can be null if addr in perm_gen
  if (hr != NULL && hr->continuesHumongous()) {
    hr = hr->humongous_start_region();
  }
  return hr;
}

inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
48
  assert(_g1_reserved.contains(addr), "invariant");
49 50 51
  size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
                                        >> HeapRegion::LogOfHRGrainBytes;

52 53
  HeapRegion* res = _hrs->at(index);
  assert(res == _hrs->addr_to_region(addr), "sanity");
54 55 56 57 58 59 60 61
  return res;
}

inline bool G1CollectedHeap::obj_in_cs(oop obj) {
  HeapRegion* r = _hrs->addr_to_region(obj);
  return r != NULL && r->in_collection_set();
}

62 63 64 65
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
66 67 68 69 70
                                                size_t word_size,
                                                bool with_heap_lock) {
  assert_not_at_safepoint();
  assert(with_heap_lock == Heap_lock->owned_by_self(),
         "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
71 72 73 74 75 76 77 78 79 80
  assert(cur_alloc_region != NULL, "pre-condition of the method");
  assert(cur_alloc_region->is_young(),
         "we only support young current alloc regions");
  assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
         "should not be used for humongous allocations");
  assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");

  assert(!cur_alloc_region->is_empty(),
         err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
                 cur_alloc_region->bottom(), cur_alloc_region->end()));
81
  HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
82 83 84
  if (result != NULL) {
    assert(is_in(result), "result should be in the heap");

85 86 87 88
    if (with_heap_lock) {
      Heap_lock->unlock();
    }
    assert_heap_not_locked();
89 90 91 92 93
    // Do the dirtying after we release the Heap_lock.
    dirty_young_block(result, word_size);
    return result;
  }

94 95 96 97 98
  if (with_heap_lock) {
    assert_heap_locked();
  } else {
    assert_heap_not_locked();
  }
99 100
  return NULL;
}
101

102 103 104 105
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size) {
106
  assert_heap_not_locked_and_not_at_safepoint();
107 108 109 110 111 112
  assert(!isHumongous(word_size), "attempt_allocation() should not be called "
         "for humongous allocation requests");

  HeapRegion* cur_alloc_region = _cur_alloc_region;
  if (cur_alloc_region != NULL) {
    HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
113 114 115
                                                   word_size,
                                                   false /* with_heap_lock */);
    assert_heap_not_locked();
116 117
    if (result != NULL) {
      return result;
118 119
    }
  }
120

121 122 123 124 125 126
  // Our attempt to allocate lock-free failed as the current
  // allocation region is either NULL or full. So, we'll now take the
  // Heap_lock and retry.
  Heap_lock->lock();

  HeapWord* result = attempt_allocation_locked(word_size);
127 128 129 130 131 132 133 134 135 136 137
  if (result != NULL) {
    assert_heap_not_locked();
    return result;
  }

  assert_heap_locked();
  return NULL;
}

inline void
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
138
  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
139 140 141 142 143 144 145 146 147 148 149
  assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
         "pre-condition of the call");
  assert(cur_alloc_region->is_young(),
         "we only support young current alloc regions");

  // The region is guaranteed to be young
  g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
  _summary_bytes_used += cur_alloc_region->used();
  _cur_alloc_region = NULL;
}

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
inline HeapWord*
G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
  assert_heap_locked_and_not_at_safepoint();
  assert(!isHumongous(word_size), "attempt_allocation_locked() "
         "should not be called for humongous allocation requests");

  // First, reread the current alloc region and retry the allocation
  // in case somebody replaced it while we were waiting to get the
  // Heap_lock.
  HeapRegion* cur_alloc_region = _cur_alloc_region;
  if (cur_alloc_region != NULL) {
    HeapWord* result = allocate_from_cur_alloc_region(
                                                  cur_alloc_region, word_size,
                                                  true /* with_heap_lock */);
    if (result != NULL) {
      assert_heap_not_locked();
      return result;
    }

    // We failed to allocate out of the current alloc region, so let's
    // retire it before getting a new one.
    retire_cur_alloc_region(cur_alloc_region);
  }

  assert_heap_locked();
  // Try to get a new region and allocate out of it
  HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
                                                     false, /* at_safepoint */
                                                     true,  /* do_dirtying */
                                                     false  /* can_expand */);
  if (result != NULL) {
    assert_heap_not_locked();
    return result;
  }

  assert_heap_locked();
  return NULL;
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline void
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
  assert_heap_not_locked();

  // Assign the containing region to containing_hr so that we don't
  // have to keep calling heap_region_containing_raw() in the
  // asserts below.
  DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
  assert(containing_hr != NULL && start != NULL && word_size > 0,
         "pre-condition");
  assert(containing_hr->is_in(start), "it should contain start");
  assert(containing_hr->is_young(), "it should be young");
  assert(!containing_hr->isHumongous(), "it should not be humongous");

  HeapWord* end = start + word_size;
  assert(containing_hr->is_in(end - 1), "it should also contain end - 1");

  MemRegion mr(start, end);
  ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
212 213
}

214
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
215 216 217 218 219 220 221 222 223 224
  return _task_queues->queue(i);
}

inline  bool G1CollectedHeap::isMarkedPrev(oop obj) const {
  return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
}

inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
  return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
}
225 226

#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP