vmGCOperations.cpp 9.2 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/javaClasses.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/preserveException.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#endif
42

D
dcubed 已提交
43
#ifndef USDT2
D
duke 已提交
44 45
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end);
D
dcubed 已提交
46
#endif /* !USDT2 */
D
duke 已提交
47 48 49 50 51

// The same dtrace probe can't be inserted in two different files, so we
// have to call it here, so it's only in one file.  Can't create new probes
// for the other file anymore.   The dtrace probes have to remain stable.
void VM_GC_Operation::notify_gc_begin(bool full) {
D
dcubed 已提交
52
#ifndef USDT2
D
duke 已提交
53
  HS_DTRACE_PROBE1(hotspot, gc__begin, full);
54
  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
D
dcubed 已提交
55 56 57 58
#else /* USDT2 */
  HOTSPOT_GC_BEGIN(
                   full);
#endif /* USDT2 */
D
duke 已提交
59 60 61
}

void VM_GC_Operation::notify_gc_end() {
D
dcubed 已提交
62
#ifndef USDT2
D
duke 已提交
63
  HS_DTRACE_PROBE(hotspot, gc__end);
64
  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
D
dcubed 已提交
65 66 67 68
#else /* USDT2 */
  HOTSPOT_GC_END(
);
#endif /* USDT2 */
D
duke 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
}

void VM_GC_Operation::acquire_pending_list_lock() {
  // we may enter this with pending exception set
  instanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
}


void VM_GC_Operation::release_and_notify_pending_list_lock() {

  instanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
}

// Allocations may fail in several threads at about the same time,
// resulting in multiple gc requests.  We only want to do one of them.
// In case a GC locker is active and the need for a GC is already signalled,
// we want to skip this GC attempt altogether, without doing a futile
// safepoint operation.
bool VM_GC_Operation::skip_operation() const {
  bool skip = (_gc_count_before != Universe::heap()->total_collections());
  if (_full && skip) {
    skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
  }
  if (!skip && GC_locker::is_active_and_needs_gc()) {
    skip = Universe::heap()->is_maximal_no_gc();
    assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
           "GC_locker cannot be active when initiating GC");
  }
  return skip;
}

bool VM_GC_Operation::doit_prologue() {
  assert(Thread::current()->is_Java_thread(), "just checking");
B
brutisso 已提交
102 103
  assert(((_gc_cause != GCCause::_no_gc) &&
          (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
D
duke 已提交
104 105 106 107 108

  acquire_pending_list_lock();
  // If the GC count has changed someone beat us to the collection
  // Get the Heap_lock after the pending_list_lock.
  Heap_lock->lock();
109

D
duke 已提交
110 111 112 113 114 115 116 117
  // Check invocations
  if (skip_operation()) {
    // skip collection
    Heap_lock->unlock();
    release_and_notify_pending_list_lock();
    _prologue_succeeded = false;
  } else {
    _prologue_succeeded = true;
118 119
    SharedHeap* sh = SharedHeap::heap();
    if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
D
duke 已提交
120 121 122 123 124 125 126 127
  }
  return _prologue_succeeded;
}


void VM_GC_Operation::doit_epilogue() {
  assert(Thread::current()->is_Java_thread(), "just checking");
  // Release the Heap_lock first.
128 129
  SharedHeap* sh = SharedHeap::heap();
  if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
D
duke 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
  Heap_lock->unlock();
  release_and_notify_pending_list_lock();
}

bool VM_GC_HeapInspection::doit_prologue() {
  if (Universe::heap()->supports_heap_inspection()) {
    return VM_GC_Operation::doit_prologue();
  } else {
    return false;
  }
}

bool VM_GC_HeapInspection::skip_operation() const {
  assert(Universe::heap()->supports_heap_inspection(), "huh?");
  return false;
}

void VM_GC_HeapInspection::doit() {
  HandleMark hm;
  CollectedHeap* ch = Universe::heap();
150 151
  ch->ensure_parsability(false); // must happen, even if collection does
                                 // not happen (e.g. due to GC_locker)
D
duke 已提交
152
  if (_full_gc) {
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
    // The collection attempt below would be skipped anyway if
    // the gc locker is held. The following dump may then be a tad
    // misleading to someone expecting only live objects to show
    // up in the dump (see CR 6944195). Just issue a suitable warning
    // in that case and do not attempt to do a collection.
    // The latter is a subtle point, because even a failed attempt
    // to GC will, in fact, induce one in the future, which we
    // probably want to avoid in this case because the GC that we may
    // be about to attempt holds value for us only
    // if it happens now and not if it happens in the eventual
    // future.
    if (GC_locker::is_active()) {
      warning("GC locker is held; pre-dump GC was skipped");
    } else {
      ch->collect_as_vm_thread(GCCause::_heap_inspection);
    }
D
duke 已提交
169
  }
170
  HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
D
duke 已提交
171 172 173 174
}


void VM_GenCollectForAllocation::doit() {
175
  SvcGCMarker sgcm(SvcGCMarker::MINOR);
D
duke 已提交
176 177 178 179 180 181 182 183 184 185 186 187

  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCCauseSetter gccs(gch, _gc_cause);
  _res = gch->satisfy_failed_allocation(_size, _tlab);
  assert(gch->is_in_reserved_or_null(_res), "result not in heap");

  if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
    set_gc_locked();
  }
}

void VM_GenCollectFull::doit() {
188
  SvcGCMarker sgcm(SvcGCMarker::FULL);
D
duke 已提交
189 190 191 192 193

  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCCauseSetter gccs(gch, _gc_cause);
  gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
}
194

195
void VM_CollectForMetadataAllocation::doit() {
196 197
  SvcGCMarker sgcm(SvcGCMarker::FULL);

198
  CollectedHeap* heap = Universe::heap();
199
  GCCauseSetter gccs(heap, _gc_cause);
200 201 202 203 204 205 206 207

  bool do_cms_concurrent = false;

  // Check again if the space is available.  Another thread
  // may have similarly failed a metadata allocation and induced
  // a GC that freed space for the allocation.
  if (!MetadataAllocationFailALot) {
    _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
208
    }
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225

  if (_result == NULL) {
    if (!UseConcMarkSweepGC) {
      // Don't clear the soft refs the first time.
      heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
      _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
      // Don't do this for now
      // This seems too costly to do a second full GC
      // Let the metaspace grow instead
      // if (_result == NULL) {
      //  // If allocation fails again, clear soft refs
      //  heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
      //  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
      // }
    } else {
      MetaspaceGC::set_should_concurrent_collect(true);
      do_cms_concurrent = true;
226
    }
227 228 229 230 231 232 233 234 235 236 237 238 239
    if (_result == NULL) {
      // If still failing, allow the Metaspace to expand.
      // See delta_capacity_until_GC() for explanation of the
      // amount of the expansion.
      // This should work unless there really is no more space
      // or a MaxMetaspaceSize has been specified on the command line.
      MetaspaceGC::set_expand_after_GC(true);
      size_t before_inc = MetaspaceGC::capacity_until_GC();
      size_t delta_words = MetaspaceGC::delta_capacity_until_GC(_size);
      MetaspaceGC::inc_capacity_until_GC(delta_words);
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
          " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
240
  }
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
      _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
      if (do_cms_concurrent && _result == NULL) {
        // Rather than fail with a metaspace out-of-memory, do a full
        // GC for CMS.
        heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
        _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
      }
      if (_result == NULL) {
        if (PrintGCDetails) {
          gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
                                 SIZE_FORMAT, _size);
        }
      }
    }
  }

  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
258 259 260
    set_gc_locked();
  }
}