diff --git a/src/cpu/zero/vm/shark_globals_zero.hpp b/src/cpu/zero/vm/shark_globals_zero.hpp index dae7249005859739e95856dc4a00fb0e13d578c1..1d17143761abc77b0fa03fe84d2b0cfb87e3cb39 100644 --- a/src/cpu/zero/vm/shark_globals_zero.hpp +++ b/src/cpu/zero/vm/shark_globals_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -51,7 +51,7 @@ define_pd_global(intx, Tier4BackEdgeThreshold, 100000); define_pd_global(intx, OnStackReplacePercentage, 933 ); define_pd_global(intx, FreqInlineSize, 325 ); define_pd_global(intx, InlineSmallCode, 1000 ); -define_pd_global(intx, NewRatio, 12 ); +define_pd_global(uintx, NewRatio, 12 ); define_pd_global(intx, NewSizeThreadIncrease, 4*K ); define_pd_global(intx, InitialCodeCacheSize, 160*K); define_pd_global(intx, ReservedCodeCacheSize, 32*M ); diff --git a/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp b/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp index 3a8d42ab68ef65db45261b5aef68461377091dd9..1144115bc5ed9cf19c7205f180c443b7ce8b7b49 100644 --- a/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp +++ b/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ define_pd_global(intx, VMThreadStackSize, 512); #endif // AMD64 define_pd_global(intx, CompilerThreadStackSize, 0); -define_pd_global(intx, SurvivorRatio, 8); +define_pd_global(uintx, SurvivorRatio, 8); define_pd_global(uintx, JVMInvokeMethodSlack, 8192); diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index 6c1de182b744d23d96fd49f424e7f20cbe7a0c6e..a9b2714c2131137428270888176fcc5dde777cea 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -274,8 +274,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( // end of a collection, we let CMSTriggerRatio of the (purported) free // space be allocated before initiating a new collection cycle. // -void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { - assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); +void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) { + assert(io <= 100 && tr <= 100, "Check the arguments"); if (io >= 0) { _initiating_occupancy = (double)io / 100.0; } else { diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp index abc9be62fe9ae4d881af2e6efbcc7ed44931f069..b688be735ae820c1466abaedc4e17de39d678047 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1093,7 +1093,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { // getter and initializer for _initiating_occupancy field. double initiating_occupancy() const { return _initiating_occupancy; } - void init_initiating_occupancy(intx io, intx tr); + void init_initiating_occupancy(intx io, uintx tr); public: ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 28c733c150f11a8b2a3ed90fc4da29b6642943cc..cc2a611f0f01266dd157fa3d79182753ac8f4791 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -4062,15 +4062,36 @@ void CMTask::do_marking_step(double time_target_ms, if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%u] we're scanning part " "["PTR_FORMAT", "PTR_FORMAT") " - "of region "PTR_FORMAT, - _worker_id, _finger, _region_limit, _curr_region); + "of region "HR_FORMAT, + _worker_id, _finger, _region_limit, + HR_FORMAT_PARAMS(_curr_region)); } - // Let's iterate over the bitmap of the part of the - // region that is left. - if (mr.is_empty() || _nextMarkBitMap->iterate(&bitmap_closure, mr)) { - // We successfully completed iterating over the region. Now, - // let's give up the region. + assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(), + "humongous regions should go around loop once only"); + + // Some special cases: + // If the memory region is empty, we can just give up the region. + // If the current region is humongous then we only need to check + // the bitmap for the bit associated with the start of the object, + // scan the object if it's live, and give up the region. + // Otherwise, let's iterate over the bitmap of the part of the region + // that is left. + // If the iteration is successful, give up the region. + if (mr.is_empty()) { + giveup_current_region(); + regular_clock_call(); + } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) { + if (_nextMarkBitMap->isMarked(mr.start())) { + // The object is marked - apply the closure + BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); + bitmap_closure.do_bit(offset); + } + // Even if this task aborted while scanning the humongous object + // we can (and should) give up the current region. + giveup_current_region(); + regular_clock_call(); + } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { giveup_current_region(); regular_clock_call(); } else { diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index cc11754c9a0d053ee3ffb47d7d29f71fdbfd0c5e..64647979045e5fe9ef241b7a2dc7b75b8c060a05 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1893,7 +1893,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : _ref_processor_stw(NULL), _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), _bot_shared(NULL), - _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), _evac_failure_scan_stack(NULL) , _mark_in_progress(false), _cg1r(NULL), _summary_bytes_used(0), @@ -4215,22 +4214,15 @@ void G1CollectedHeap::remove_self_forwarding_pointers() { assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); // Now restore saved marks, if any. - if (_objs_with_preserved_marks != NULL) { - assert(_preserved_marks_of_objs != NULL, "Both or none."); - guarantee(_objs_with_preserved_marks->length() == - _preserved_marks_of_objs->length(), "Both or none."); - for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { - oop obj = _objs_with_preserved_marks->at(i); - markOop m = _preserved_marks_of_objs->at(i); - obj->set_mark(m); - } - - // Delete the preserved marks growable arrays (allocated on the C heap). - delete _objs_with_preserved_marks; - delete _preserved_marks_of_objs; - _objs_with_preserved_marks = NULL; - _preserved_marks_of_objs = NULL; + assert(_objs_with_preserved_marks.size() == + _preserved_marks_of_objs.size(), "Both or none."); + while (!_objs_with_preserved_marks.is_empty()) { + oop obj = _objs_with_preserved_marks.pop(); + markOop m = _preserved_marks_of_objs.pop(); + obj->set_mark(m); } + _objs_with_preserved_marks.clear(true); + _preserved_marks_of_objs.clear(true); } void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { @@ -4313,15 +4305,8 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { // We want to call the "for_promotion_failure" version only in the // case of a promotion failure. if (m->must_be_preserved_for_promotion_failure(obj)) { - if (_objs_with_preserved_marks == NULL) { - assert(_preserved_marks_of_objs == NULL, "Both or none."); - _objs_with_preserved_marks = - new (ResourceObj::C_HEAP, mtGC) GrowableArray(40, true); - _preserved_marks_of_objs = - new (ResourceObj::C_HEAP, mtGC) GrowableArray(40, true); - } - _objs_with_preserved_marks->push(obj); - _preserved_marks_of_objs->push(m); + _objs_with_preserved_marks.push(obj); + _preserved_marks_of_objs.push(m); } } diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 816d4f253ac008a94af312dca955c9fcdfcce858..7dc5bd83047fa90b37c0955b0ff59c3f24a3d64b 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -37,6 +37,7 @@ #include "memory/barrierSet.hpp" #include "memory/memRegion.hpp" #include "memory/sharedHeap.hpp" +#include "utilities/stack.hpp" // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. // It uses the "Garbage First" heap organization and algorithm, which @@ -877,10 +878,9 @@ protected: // forwarding pointers to themselves. Reset them. void remove_self_forwarding_pointers(); - // When one is non-null, so is the other. Together, they each pair is - // an object with a preserved mark, and its mark value. - GrowableArray* _objs_with_preserved_marks; - GrowableArray* _preserved_marks_of_objs; + // Together, these store an object with a preserved mark, and its mark value. + Stack _objs_with_preserved_marks; + Stack _preserved_marks_of_objs; // Preserve the mark of "obj", if necessary, in preparation for its mark // word being overwritten with a self-forwarding-pointer. diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp index d362956ea802b8570cc0eb75ced4c08c0ca370c1..1111cecfa4c4b5b498c6267766a762932d208bd7 100644 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -101,9 +101,6 @@ "to-space, we will allow regions whose survival rate is up to " \ "S + (1 - S)*X, where X is this parameter (as a fraction.)") \ \ - develop(intx, G1InitYoungSurvRatio, 50, \ - "Expected Survival Rate for newly allocated bytes") \ - \ develop(bool, G1SATBPrintStubs, false, \ "If true, print generated stubs for the SATB barrier") \ \ diff --git a/src/share/vm/memory/metaspace.cpp b/src/share/vm/memory/metaspace.cpp index c629e175f09e6042173beb17494d8cd193d5e4a5..b0891150d9c133dec8d3fc2186825677491d0515 100644 --- a/src/share/vm/memory/metaspace.cpp +++ b/src/share/vm/memory/metaspace.cpp @@ -1064,11 +1064,11 @@ bool VirtualSpaceList::contains(const void *ptr) { // // After the GC the compute_new_size() for MetaspaceGC is called to // resize the capacity of the metaspaces. The current implementation -// is based on the flags MinHeapFreeRatio and MaxHeapFreeRatio used +// is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used // to resize the Java heap by some GC's. New flags can be implemented // if really needed. MinHeapFreeRatio is used to calculate how much // free space is desirable in the metaspace capacity to decide how much -// to increase the HWM. MaxHeapFreeRatio is used to decide how much +// to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much // free space is desirable in the metaspace capacity before decreasing // the HWM. @@ -1166,7 +1166,7 @@ void MetaspaceGC::compute_new_size() { size_t capacity_until_GC = vsl->capacity_bytes_sum(); size_t free_after_gc = capacity_until_GC - used_after_gc; - const double minimum_free_percentage = MinHeapFreeRatio / 100.0; + const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; const double maximum_used_percentage = 1.0 - minimum_free_percentage; const double min_tmp = used_after_gc / maximum_used_percentage; @@ -1232,8 +1232,8 @@ void MetaspaceGC::compute_new_size() { max_shrink_words)); // Should shrinking be considered? - if (MaxHeapFreeRatio < 100) { - const double maximum_free_percentage = MaxHeapFreeRatio / 100.0; + if (MaxMetaspaceFreeRatio < 100) { + const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; const double minimum_used_percentage = 1.0 - maximum_free_percentage; const double max_tmp = used_after_gc / minimum_used_percentage; size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp index de596c1a8ba166795d17904a8497341849b2cbd6..db33df97c64d04bda4f798654d3ad04688f5c38f 100644 --- a/src/share/vm/runtime/arguments.cpp +++ b/src/share/vm/runtime/arguments.cpp @@ -1257,7 +1257,7 @@ void Arguments::set_cms_and_parnew_gc_flags() { // prefer minuscule survivor spaces so as not to waste // space for (non-existent) survivors if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) { - FLAG_SET_ERGO(intx, SurvivorRatio, MAX2((intx)1024, SurvivorRatio)); + FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio)); } // If OldPLABSize is set and CMSParPromoteBlocksToClaim is not, // set CMSParPromoteBlocksToClaim equal to OldPLABSize. @@ -1897,6 +1897,24 @@ bool Arguments::check_vm_args_consistency() { // Keeping the heap 100% free is hard ;-) so limit it to 99%. MinHeapFreeRatio = MIN2(MinHeapFreeRatio, (uintx) 99); + // Min/MaxMetaspaceFreeRatio + status = status && verify_percentage(MinMetaspaceFreeRatio, "MinMetaspaceFreeRatio"); + status = status && verify_percentage(MaxMetaspaceFreeRatio, "MaxMetaspaceFreeRatio"); + + if (MinMetaspaceFreeRatio > MaxMetaspaceFreeRatio) { + jio_fprintf(defaultStream::error_stream(), + "MinMetaspaceFreeRatio (%s" UINTX_FORMAT ") must be less than or " + "equal to MaxMetaspaceFreeRatio (%s" UINTX_FORMAT ")\n", + FLAG_IS_DEFAULT(MinMetaspaceFreeRatio) ? "Default: " : "", + MinMetaspaceFreeRatio, + FLAG_IS_DEFAULT(MaxMetaspaceFreeRatio) ? "Default: " : "", + MaxMetaspaceFreeRatio); + status = false; + } + + // Trying to keep 100% free is not practical + MinMetaspaceFreeRatio = MIN2(MinMetaspaceFreeRatio, (uintx) 99); + if (FullGCALot && FLAG_IS_DEFAULT(MarkSweepAlwaysCompactCount)) { MarkSweepAlwaysCompactCount = 1; // Move objects every gc. } @@ -1904,7 +1922,7 @@ bool Arguments::check_vm_args_consistency() { if (UseParallelOldGC && ParallelOldGCSplitALot) { // Settings to encourage splitting. if (!FLAG_IS_CMDLINE(NewRatio)) { - FLAG_SET_CMDLINE(intx, NewRatio, 2); + FLAG_SET_CMDLINE(uintx, NewRatio, 2); } if (!FLAG_IS_CMDLINE(ScavengeBeforeFullGC)) { FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false); diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp index 7db81d805bd39be7f42fb40700f63cd49f8e1d96..8a0be8f8a4fb7b8e2204b61bde31216e0658b72a 100644 --- a/src/share/vm/runtime/globals.hpp +++ b/src/share/vm/runtime/globals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1802,7 +1802,7 @@ class CommandLineFlags { product(bool, ParallelRefProcBalancingEnabled, true, \ "Enable balancing of reference processing queues") \ \ - product(intx, CMSTriggerRatio, 80, \ + product(uintx, CMSTriggerRatio, 80, \ "Percentage of MinHeapFreeRatio in CMS generation that is " \ "allocated before a CMS collection cycle commences") \ \ @@ -1816,7 +1816,7 @@ class CommandLineFlags { \ product(uintx, InitiatingHeapOccupancyPercent, 45, \ "Percentage of the (entire) heap occupancy to start a " \ - "concurrent GC cycle. It us used by GCs that trigger a " \ + "concurrent GC cycle. It is used by GCs that trigger a " \ "concurrent GC cycle based on the occupancy of the entire heap, " \ "not just one of the generations (e.g., G1). A value of 0 " \ "denotes 'do constant GC cycles'.") \ @@ -2977,10 +2977,10 @@ class CommandLineFlags { product(uintx, TLABWasteIncrement, 4, \ "Increment allowed waste at slow allocation") \ \ - product(intx, SurvivorRatio, 8, \ + product(uintx, SurvivorRatio, 8, \ "Ratio of eden/survivor space size") \ \ - product(intx, NewRatio, 2, \ + product(uintx, NewRatio, 2, \ "Ratio of new/old generation sizes") \ \ product_pd(uintx, NewSizeThreadIncrease, \ @@ -3010,10 +3010,16 @@ class CommandLineFlags { "Min change in heap space due to GC (in bytes)") \ \ product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ - "Min expansion of permanent heap (in bytes)") \ + "Min expansion of Metaspace (in bytes)") \ + \ + product(uintx, MinMetaspaceFreeRatio, 40, \ + "Min percentage of Metaspace free after GC to avoid expansion") \ + \ + product(uintx, MaxMetaspaceFreeRatio, 70, \ + "Max percentage of Metaspace free after GC to avoid shrinking") \ \ product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ - "Max expansion of permanent heap without full GC (in bytes)") \ + "Max expansion of Metaspace without full GC (in bytes)") \ \ product(intx, QueuedAllocationWarningCount, 0, \ "Number of times an allocation that queues behind a GC " \ @@ -3031,7 +3037,7 @@ class CommandLineFlags { product(uintx, InitialTenuringThreshold, 7, \ "Initial value for tenuring threshold") \ \ - product(intx, TargetSurvivorRatio, 50, \ + product(uintx, TargetSurvivorRatio, 50, \ "Desired percentage of survivor space used after scavenge") \ \ product(uintx, MarkSweepDeadRatio, 5, \