g1CollectedHeap.inline.hpp 6.9 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28 29
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
30
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 32 33
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "utilities/taskqueue.hpp"

34 35 36 37 38 39 40 41 42 43 44 45 46 47
// Inline functions for G1CollectedHeap

inline HeapRegion*
G1CollectedHeap::heap_region_containing(const void* addr) const {
  HeapRegion* hr = _hrs->addr_to_region(addr);
  // hr can be null if addr in perm_gen
  if (hr != NULL && hr->continuesHumongous()) {
    hr = hr->humongous_start_region();
  }
  return hr;
}

inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
48
  assert(_g1_reserved.contains(addr), "invariant");
49 50 51
  size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
                                        >> HeapRegion::LogOfHRGrainBytes;

52 53
  HeapRegion* res = _hrs->at(index);
  assert(res == _hrs->addr_to_region(addr), "sanity");
54 55 56 57 58 59 60 61
  return res;
}

inline bool G1CollectedHeap::obj_in_cs(oop obj) {
  HeapRegion* r = _hrs->addr_to_region(obj);
  return r != NULL && r->in_collection_set();
}

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
                                                size_t word_size) {
  assert_heap_locked_and_not_at_safepoint();
  assert(cur_alloc_region != NULL, "pre-condition of the method");
  assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
  assert(cur_alloc_region->is_young(),
         "we only support young current alloc regions");
  assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
         "should not be used for humongous allocations");
  assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");

  assert(!cur_alloc_region->is_empty(),
         err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
                 cur_alloc_region->bottom(), cur_alloc_region->end()));
  // This allocate method does BOT updates and we don't need them in
  // the young generation. This will be fixed in the near future by
  // CR 6994297.
  HeapWord* result = cur_alloc_region->allocate(word_size);
  if (result != NULL) {
    assert(is_in(result), "result should be in the heap");
    Heap_lock->unlock();

    // Do the dirtying after we release the Heap_lock.
    dirty_young_block(result, word_size);
    return result;
  }

  assert_heap_locked();
  return NULL;
}
95

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size) {
  assert_heap_locked_and_not_at_safepoint();
  assert(!isHumongous(word_size), "attempt_allocation() should not be called "
         "for humongous allocation requests");

  HeapRegion* cur_alloc_region = _cur_alloc_region;
  if (cur_alloc_region != NULL) {
    HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
                                                      word_size);
    if (result != NULL) {
      assert_heap_not_locked();
      return result;
111
    }
112 113 114 115 116 117

    assert_heap_locked();

    // Since we couldn't successfully allocate into it, retire the
    // current alloc region.
    retire_cur_alloc_region(cur_alloc_region);
118
  }
119 120 121

  // Try to get a new region and allocate out of it
  HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
122 123 124
                                                     false, /* at_safepoint */
                                                     true,  /* do_dirtying */
                                                     false  /* can_expand */);
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
  if (result != NULL) {
    assert_heap_not_locked();
    return result;
  }

  assert_heap_locked();
  return NULL;
}

inline void
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
  assert_heap_locked_or_at_safepoint();
  assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
         "pre-condition of the call");
  assert(cur_alloc_region->is_young(),
         "we only support young current alloc regions");

  // The region is guaranteed to be young
  g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
  _summary_bytes_used += cur_alloc_region->used();
  _cur_alloc_region = NULL;
}

// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline void
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
  assert_heap_not_locked();

  // Assign the containing region to containing_hr so that we don't
  // have to keep calling heap_region_containing_raw() in the
  // asserts below.
  DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
  assert(containing_hr != NULL && start != NULL && word_size > 0,
         "pre-condition");
  assert(containing_hr->is_in(start), "it should contain start");
  assert(containing_hr->is_young(), "it should be young");
  assert(!containing_hr->isHumongous(), "it should not be humongous");

  HeapWord* end = start + word_size;
  assert(containing_hr->is_in(end - 1), "it should also contain end - 1");

  MemRegion mr(start, end);
  ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
171 172
}

173
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
174 175 176 177 178 179 180 181 182 183
  return _task_queues->queue(i);
}

inline  bool G1CollectedHeap::isMarkedPrev(oop obj) const {
  return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
}

inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
  return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
}
184 185

#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP