g1MarkSweep.cpp 13.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "memory/gcLocker.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/modRefBarrierSet.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/space.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
49 50 51 52 53 54 55

class HeapRegion;

void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
                                      bool clear_all_softrefs) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");

56 57 58 59 60 61
  SharedHeap* sh = SharedHeap::heap();
#ifdef ASSERT
  if (sh->collector_policy()->should_clear_all_soft_refs()) {
    assert(clear_all_softrefs, "Policy should have been checked earler");
  }
#endif
62 63 64
  // hook up weak ref data so it can be used during Mark-Sweep
  assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
  assert(rp != NULL, "should be non-NULL");
65
  GenMarkSweep::_ref_processor = rp;
66
  rp->setup_policy(clear_all_softrefs);
67 68 69 70 71 72 73 74 75 76 77 78 79 80

  // When collecting the permanent generation methodOops may be moving,
  // so we either have to flush all bcp data or convert it into bci.
  CodeCache::gc_prologue();
  Threads::gc_prologue();

  // Increment the invocation count for the permanent generation, since it is
  // implicitly collected whenever we do a full mark sweep collection.
  sh->perm_gen()->stat_record()->invocations++;

  bool marked_for_unloading = false;

  allocate_stacks();

81 82 83 84
  // We should save the marks of the currently locked biased monitors.
  // The marking doesn't preserve the marks of biased objects.
  BiasedLocking::preserve_marks();

85 86
  mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);

J
johnc 已提交
87
  if (VerifyDuringGC) {
88 89 90 91 92 93 94 95 96 97 98 99 100 101
      G1CollectedHeap* g1h = G1CollectedHeap::heap();
      g1h->checkConcurrentMark();
  }

  mark_sweep_phase2();

  // Don't add any more derived pointers during phase3
  COMPILER2_PRESENT(DerivedPointerTable::set_active(false));

  mark_sweep_phase3();

  mark_sweep_phase4();

  GenMarkSweep::restore_marks();
102
  BiasedLocking::restore_marks();
103 104 105 106 107 108 109 110 111 112 113 114 115
  GenMarkSweep::deallocate_stacks();

  // We must invalidate the perm-gen rs, so that it gets rebuilt.
  GenRemSet* rs = sh->rem_set();
  rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);

  // "free at last gc" is calculated from these.
  // CHF: cheating for now!!!
  //  Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
  //  Universe::set_heap_used_at_last_gc(Universe::heap()->used());

  Threads::gc_epilogue();
  CodeCache::gc_epilogue();
116
  JvmtiExport::gc_epilogue();
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137

  // refs processing: clean slate
  GenMarkSweep::_ref_processor = NULL;
}


void G1MarkSweep::allocate_stacks() {
  GenMarkSweep::_preserved_count_max = 0;
  GenMarkSweep::_preserved_marks = NULL;
  GenMarkSweep::_preserved_count = 0;
}

void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
                                    bool clear_all_softrefs) {
  // Recursively traverse all live objects and mark them
  EventMark m("1 mark object");
  TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
  GenMarkSweep::trace(" 1");

  SharedHeap* sh = SharedHeap::heap();

138 139
  sh->process_strong_roots(true,  // activeate StrongRootsScope
                           true,  // Collecting permanent generation.
140 141
                           SharedHeap::SO_SystemClasses,
                           &GenMarkSweep::follow_root_closure,
142
                           &GenMarkSweep::follow_code_root_closure,
143 144 145
                           &GenMarkSweep::follow_root_closure);

  // Process reference objects found during marking
146
  ReferenceProcessor* rp = GenMarkSweep::ref_processor();
147
  rp->setup_policy(clear_all_softrefs);
148 149 150 151
  rp->process_discovered_references(&GenMarkSweep::is_alive,
                                    &GenMarkSweep::keep_alive,
                                    &GenMarkSweep::follow_stack_closure,
                                    NULL);
152 153 154

  // Follow system dictionary roots and unload classes
  bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
155
  assert(GenMarkSweep::_marking_stack.is_empty(),
156 157 158 159 160 161 162
         "stack should be empty by now");

  // Follow code cache roots (has to be done after system dictionary,
  // assumes all live klasses are marked)
  CodeCache::do_unloading(&GenMarkSweep::is_alive,
                                   &GenMarkSweep::keep_alive,
                                   purged_class);
Y
ysr 已提交
163
  GenMarkSweep::follow_stack();
164 165 166

  // Update subklass/sibling/implementor links of live klasses
  GenMarkSweep::follow_weak_klass_links();
167
  assert(GenMarkSweep::_marking_stack.is_empty(),
168 169
         "stack should be empty by now");

Y
ysr 已提交
170 171
  // Visit memoized MDO's and clear any unmarked weak refs
  GenMarkSweep::follow_mdo_weak_refs();
172
  assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
Y
ysr 已提交
173 174


175
  // Visit interned string tables and delete unmarked oops
176
  StringTable::unlink(&GenMarkSweep::is_alive);
177 178
  // Clean up unreferenced symbols in symbol table.
  SymbolTable::unlink();
179

180
  assert(GenMarkSweep::_marking_stack.is_empty(),
181 182 183 184
         "stack should be empty by now");
}

class G1PrepareCompactClosure: public HeapRegionClosure {
185
  G1CollectedHeap* _g1h;
186 187
  ModRefBarrierSet* _mrbs;
  CompactPoint _cp;
188 189 190
  size_t _pre_used;
  FreeRegionList _free_list;
  HumongousRegionSet _humongous_proxy_set;
191 192 193 194 195

  void free_humongous_region(HeapRegion* hr) {
    HeapWord* end = hr->end();
    assert(hr->startsHumongous(),
           "Only the start of a humongous region should be freed.");
196 197 198 199
    _g1h->free_humongous_region(hr, &_pre_used, &_free_list,
                                &_humongous_proxy_set, false /* par */);
    // Do we also need to do this for the continues humongous regions
    // we just collapsed?
200 201 202
    hr->prepare_for_compaction(&_cp);
    // Also clear the part of the card table that will be unused after
    // compaction.
203
    _mrbs->clear(MemRegion(hr->compaction_top(), end));
204 205 206
  }

public:
207 208 209
  G1PrepareCompactClosure(CompactibleSpace* cs)
  : _g1h(G1CollectedHeap::heap()),
    _mrbs(G1CollectedHeap::heap()->mr_bs()),
210
    _cp(NULL, cs, cs->initialize_threshold()),
211 212 213 214 215 216 217 218 219 220 221 222 223 224
    _pre_used(0),
    _free_list("Local Free List for G1MarkSweep"),
    _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }

  void update_sets() {
    // We'll recalculate total used bytes and recreate the free list
    // at the end of the GC, so no point in updating those values here.
    _g1h->update_sets_after_freeing_regions(0, /* pre_used */
                                            NULL, /* free_list */
                                            &_humongous_proxy_set,
                                            false /* par */);
    _free_list.remove_all();
  }

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
  bool doHeapRegion(HeapRegion* hr) {
    if (hr->isHumongous()) {
      if (hr->startsHumongous()) {
        oop obj = oop(hr->bottom());
        if (obj->is_gc_marked()) {
          obj->forward_to(obj);
        } else  {
          free_humongous_region(hr);
        }
      } else {
        assert(hr->continuesHumongous(), "Invalid humongous.");
      }
    } else {
      hr->prepare_for_compaction(&_cp);
      // Also clear the part of the card table that will be unused after
      // compaction.
      _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
    }
    return false;
  }
};
246 247

// Finds the first HeapRegion.
248 249 250
class FindFirstRegionClosure: public HeapRegionClosure {
  HeapRegion* _a_region;
public:
251
  FindFirstRegionClosure() : _a_region(NULL) {}
252
  bool doHeapRegion(HeapRegion* r) {
253 254
    _a_region = r;
    return true;
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
  }
  HeapRegion* result() { return _a_region; }
};

void G1MarkSweep::mark_sweep_phase2() {
  // Now all live objects are marked, compute the new object addresses.

  // It is imperative that we traverse perm_gen LAST. If dead space is
  // allowed a range of dead object may get overwritten by a dead int
  // array. If perm_gen is not traversed last a klassOop may get
  // overwritten. This is fine since it is dead, but if the class has dead
  // instances we have to skip them, and in order to find their size we
  // need the klassOop!
  //
  // It is not required that we traverse spaces in the same order in
  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
  // tracking expects us to do so. See comment under phase4.

  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  Generation* pg = g1h->perm_gen();

  EventMark m("2 compute new addresses");
  TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
  GenMarkSweep::trace("2");

280
  FindFirstRegionClosure cl;
281 282 283 284 285 286 287
  g1h->heap_region_iterate(&cl);
  HeapRegion *r = cl.result();
  CompactibleSpace* sp = r;
  if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
    sp = r->next_compaction_space();
  }

288
  G1PrepareCompactClosure blk(sp);
289
  g1h->heap_region_iterate(&blk);
290
  blk.update_sets();
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

  CompactPoint perm_cp(pg, NULL, NULL);
  pg->prepare_for_compaction(&perm_cp);
}

class G1AdjustPointersClosure: public HeapRegionClosure {
 public:
  bool doHeapRegion(HeapRegion* r) {
    if (r->isHumongous()) {
      if (r->startsHumongous()) {
        // We must adjust the pointers on the single H object.
        oop obj = oop(r->bottom());
        debug_only(GenMarkSweep::track_interior_pointers(obj));
        // point all the oops to the new location
        obj->adjust_pointers();
        debug_only(GenMarkSweep::check_interior_pointers());
      }
    } else {
      // This really ought to be "as_CompactibleSpace"...
      r->adjust_pointers();
    }
    return false;
  }
};

void G1MarkSweep::mark_sweep_phase3() {
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  Generation* pg = g1h->perm_gen();

  // Adjust the pointers to reflect the new locations
  EventMark m("3 adjust pointers");
  TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
  GenMarkSweep::trace("3");

  SharedHeap* sh = SharedHeap::heap();

327 328
  sh->process_strong_roots(true,  // activate StrongRootsScope
                           true,  // Collecting permanent generation.
329 330
                           SharedHeap::SO_AllClasses,
                           &GenMarkSweep::adjust_root_pointer_closure,
331
                           NULL,  // do not touch code cache here
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
                           &GenMarkSweep::adjust_pointer_closure);

  g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);

  // Now adjust pointers in remaining weak roots.  (All of which should
  // have been cleared if they pointed to non-surviving objects.)
  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
                             &GenMarkSweep::adjust_pointer_closure);

  GenMarkSweep::adjust_marks();

  G1AdjustPointersClosure blk;
  g1h->heap_region_iterate(&blk);
  pg->adjust_pointers();
}

class G1SpaceCompactClosure: public HeapRegionClosure {
public:
  G1SpaceCompactClosure() {}

  bool doHeapRegion(HeapRegion* hr) {
    if (hr->isHumongous()) {
      if (hr->startsHumongous()) {
        oop obj = oop(hr->bottom());
        if (obj->is_gc_marked()) {
          obj->init_mark();
        } else {
          assert(hr->is_empty(), "Should have been cleared in phase 2.");
        }
        hr->reset_during_compaction();
      }
    } else {
      hr->compact();
    }
    return false;
  }
};

void G1MarkSweep::mark_sweep_phase4() {
  // All pointers are now adjusted, move objects accordingly

  // It is imperative that we traverse perm_gen first in phase4. All
  // classes must be allocated earlier than their instances, and traversing
  // perm_gen first makes sure that all klassOops have moved to their new
  // location before any instance does a dispatch through it's klass!

  // The ValidateMarkSweep live oops tracking expects us to traverse spaces
  // in the same order in phase2, phase3 and phase4. We don't quite do that
  // here (perm_gen first rather than last), so we tell the validate code
  // to use a higher index (saved from phase2) when verifying perm_gen.
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  Generation* pg = g1h->perm_gen();

  EventMark m("4 compact heap");
  TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
  GenMarkSweep::trace("4");

  pg->compact();

  G1SpaceCompactClosure blk;
  g1h->heap_region_iterate(&blk);

}

// Local Variables: ***
// c-indentation-style: gnu ***
// End: ***