vmGCOperations.cpp 10.6 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32 33 34 35 36 37 38
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/javaClasses.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/preserveException.hpp"
39 40
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
41
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
42
#endif // INCLUDE_ALL_GCS
43

D
dcubed 已提交
44
#ifndef USDT2
D
duke 已提交
45 46
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end);
D
dcubed 已提交
47
#endif /* !USDT2 */
D
duke 已提交
48 49 50 51 52

// The same dtrace probe can't be inserted in two different files, so we
// have to call it here, so it's only in one file.  Can't create new probes
// for the other file anymore.   The dtrace probes have to remain stable.
void VM_GC_Operation::notify_gc_begin(bool full) {
D
dcubed 已提交
53
#ifndef USDT2
D
duke 已提交
54
  HS_DTRACE_PROBE1(hotspot, gc__begin, full);
55
  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
D
dcubed 已提交
56 57 58 59
#else /* USDT2 */
  HOTSPOT_GC_BEGIN(
                   full);
#endif /* USDT2 */
D
duke 已提交
60 61 62
}

void VM_GC_Operation::notify_gc_end() {
D
dcubed 已提交
63
#ifndef USDT2
D
duke 已提交
64
  HS_DTRACE_PROBE(hotspot, gc__end);
65
  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
D
dcubed 已提交
66 67 68 69
#else /* USDT2 */
  HOTSPOT_GC_END(
);
#endif /* USDT2 */
D
duke 已提交
70 71 72 73
}

void VM_GC_Operation::acquire_pending_list_lock() {
  // we may enter this with pending exception set
74
  InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
D
duke 已提交
75 76 77 78 79
}


void VM_GC_Operation::release_and_notify_pending_list_lock() {

80
  InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
D
duke 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
}

// Allocations may fail in several threads at about the same time,
// resulting in multiple gc requests.  We only want to do one of them.
// In case a GC locker is active and the need for a GC is already signalled,
// we want to skip this GC attempt altogether, without doing a futile
// safepoint operation.
bool VM_GC_Operation::skip_operation() const {
  bool skip = (_gc_count_before != Universe::heap()->total_collections());
  if (_full && skip) {
    skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
  }
  if (!skip && GC_locker::is_active_and_needs_gc()) {
    skip = Universe::heap()->is_maximal_no_gc();
    assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
           "GC_locker cannot be active when initiating GC");
  }
  return skip;
}

bool VM_GC_Operation::doit_prologue() {
  assert(Thread::current()->is_Java_thread(), "just checking");
B
brutisso 已提交
103 104
  assert(((_gc_cause != GCCause::_no_gc) &&
          (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
D
duke 已提交
105

106 107 108 109 110 111 112 113 114
  // To be able to handle a GC the VM initialization needs to be completed.
  if (!is_init_completed()) {
    vm_exit_during_initialization(
      err_msg("GC triggered before VM initialization completed. Try increasing "
              "NewSize, current value " UINTX_FORMAT "%s.",
              byte_size_in_proper_unit(NewSize),
              proper_unit_for_byte_size(NewSize)));
  }

D
duke 已提交
115 116 117 118
  acquire_pending_list_lock();
  // If the GC count has changed someone beat us to the collection
  // Get the Heap_lock after the pending_list_lock.
  Heap_lock->lock();
119

D
duke 已提交
120 121 122 123 124 125 126 127
  // Check invocations
  if (skip_operation()) {
    // skip collection
    Heap_lock->unlock();
    release_and_notify_pending_list_lock();
    _prologue_succeeded = false;
  } else {
    _prologue_succeeded = true;
128 129
    SharedHeap* sh = SharedHeap::heap();
    if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
D
duke 已提交
130 131 132 133 134 135 136 137
  }
  return _prologue_succeeded;
}


void VM_GC_Operation::doit_epilogue() {
  assert(Thread::current()->is_Java_thread(), "just checking");
  // Release the Heap_lock first.
138 139
  SharedHeap* sh = SharedHeap::heap();
  if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
D
duke 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
  Heap_lock->unlock();
  release_and_notify_pending_list_lock();
}

bool VM_GC_HeapInspection::doit_prologue() {
  if (Universe::heap()->supports_heap_inspection()) {
    return VM_GC_Operation::doit_prologue();
  } else {
    return false;
  }
}

bool VM_GC_HeapInspection::skip_operation() const {
  assert(Universe::heap()->supports_heap_inspection(), "huh?");
  return false;
}

S
sla 已提交
157 158 159 160 161 162 163 164
bool VM_GC_HeapInspection::collect() {
  if (GC_locker::is_active()) {
    return false;
  }
  Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
  return true;
}

D
duke 已提交
165 166
void VM_GC_HeapInspection::doit() {
  HandleMark hm;
S
sla 已提交
167 168 169
  Universe::heap()->ensure_parsability(false); // must happen, even if collection does
                                               // not happen (e.g. due to GC_locker)
                                               // or _full_gc being false
D
duke 已提交
170
  if (_full_gc) {
S
sla 已提交
171 172 173 174 175 176 177 178 179 180 181
    if (!collect()) {
      // The collection attempt was skipped because the gc locker is held.
      // The following dump may then be a tad misleading to someone expecting
      // only live objects to show up in the dump (see CR 6944195). Just issue
      // a suitable warning in that case and do not attempt to do a collection.
      // The latter is a subtle point, because even a failed attempt
      // to GC will, in fact, induce one in the future, which we
      // probably want to avoid in this case because the GC that we may
      // be about to attempt holds value for us only
      // if it happens now and not if it happens in the eventual
      // future.
182 183
      warning("GC locker is held; pre-dump GC was skipped");
    }
D
duke 已提交
184
  }
185 186
  HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
                         _columns);
S
sla 已提交
187
  inspect.heap_inspection(_out);
D
duke 已提交
188 189 190 191
}


void VM_GenCollectForAllocation::doit() {
192
  SvcGCMarker sgcm(SvcGCMarker::MINOR);
D
duke 已提交
193 194 195

  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCCauseSetter gccs(gch, _gc_cause);
196 197
  _result = gch->satisfy_failed_allocation(_word_size, _tlab);
  assert(gch->is_in_reserved_or_null(_result), "result not in heap");
D
duke 已提交
198

199
  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
D
duke 已提交
200 201 202 203 204
    set_gc_locked();
  }
}

void VM_GenCollectFull::doit() {
205
  SvcGCMarker sgcm(SvcGCMarker::FULL);
D
duke 已提交
206 207 208 209 210

  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCCauseSetter gccs(gch, _gc_cause);
  gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
}
211

212
// Returns true iff concurrent GCs unloads metadata.
213 214
bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
#if INCLUDE_ALL_GCS
215 216 217 218
  if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
    MetaspaceGC::set_should_concurrent_collect(true);
    return true;
  }
219

220
  if (UseG1GC && ClassUnloadingWithConcurrentMark) {
221 222
    G1CollectedHeap* g1h = G1CollectedHeap::heap();
    g1h->g1_policy()->set_initiate_conc_mark_if_possible();
223

224
    GCCauseSetter x(g1h, _gc_cause);
225

226 227 228
    // At this point we are supposed to start a concurrent cycle. We
    // will do so if one is not already in progress.
    bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
229

230 231 232 233
    if (should_start) {
      double pause_target = g1h->g1_policy()->max_pause_time_ms();
      g1h->do_collection_pause_at_safepoint(pause_target);
    }
234 235 236
    return true;
  }
#endif
237

238 239 240 241 242 243 244 245 246 247 248 249 250
  return false;
}

static void log_metaspace_alloc_failure_for_concurrent_GC() {
  if (Verbose && PrintGCDetails) {
    if (UseConcMarkSweepGC) {
      gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
    } else if (UseG1GC) {
      gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
    }
  }
}

251
void VM_CollectForMetadataAllocation::doit() {
252 253
  SvcGCMarker sgcm(SvcGCMarker::FULL);

254
  CollectedHeap* heap = Universe::heap();
255
  GCCauseSetter gccs(heap, _gc_cause);
256 257 258 259 260 261

  // Check again if the space is available.  Another thread
  // may have similarly failed a metadata allocation and induced
  // a GC that freed space for the allocation.
  if (!MetadataAllocationFailALot) {
    _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
262 263 264
    if (_result != NULL) {
      return;
    }
265
  }
266

267 268 269 270 271
  if (initiate_concurrent_GC()) {
    // For CMS and G1 expand since the collection is going to be concurrent.
    _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
    if (_result != NULL) {
      return;
272
    }
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309

    log_metaspace_alloc_failure_for_concurrent_GC();
  }

  // Don't clear the soft refs yet.
  heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
  // After a GC try to allocate without expanding.  Could fail
  // and expansion will be tried below.
  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
  if (_result != NULL) {
    return;
  }

  // If still failing, allow the Metaspace to expand.
  // See delta_capacity_until_GC() for explanation of the
  // amount of the expansion.
  // This should work unless there really is no more space
  // or a MaxMetaspaceSize has been specified on the command line.
  _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
  if (_result != NULL) {
    return;
  }

  // If expansion failed, do a last-ditch collection and try allocating
  // again.  A last-ditch collection will clear softrefs.  This
  // behavior is similar to the last-ditch collection done for perm
  // gen when it was full and a collection for failed allocation
  // did not free perm gen space.
  heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
  if (_result != NULL) {
    return;
  }

  if (Verbose && PrintGCDetails) {
    gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
                           SIZE_FORMAT, _size);
310 311
  }

312
  if (GC_locker::is_active_and_needs_gc()) {
313 314 315
    set_gc_locked();
  }
}