diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 2439b41e6932634046a7cdecff271523975740f1..c22293c5e8c72cbba5cb7d201ef8dfb33d1c0274 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -2336,25 +2336,6 @@ size_t G1CollectedHeap::recalculate_used() const { return blk.result(); } -size_t G1CollectedHeap::unsafe_max_alloc() { - if (num_free_regions() > 0) return HeapRegion::GrainBytes; - // otherwise, is there space in the current allocation region? - - // We need to store the current allocation region in a local variable - // here. The problem is that this method doesn't take any locks and - // there may be other threads which overwrite the current allocation - // region field. attempt_allocation(), for example, sets it to NULL - // and this can happen *after* the NULL check here but before the call - // to free(), resulting in a SIGSEGV. Note that this doesn't appear - // to be a problem in the optimized build, since the two loads of the - // current allocation region field are optimized away. - HeapRegion* hr = _mutator_alloc_region.get(); - if (hr == NULL) { - return 0; - } - return hr->free(); -} - bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { switch (cause) { case GCCause::_gc_locker: return GCLockerInvokesConcurrent; diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 9f6a93e4558b5a05ff6a14f74014dea2aa7a3fed..4d884a5f58200bd56033bd109a807e2a47756d17 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -1170,15 +1170,6 @@ public: // end fields defining the extent of the contiguous allocation region.) // But G1CollectedHeap doesn't yet support this. - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc(); - virtual bool is_maximal_no_gc() const { return _hrs.available() == 0; } diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp index bbc3800a2b05952f9f87d18eb970c8809cfd57ed..7a780537974db6b88be9d3deacee25cb10572851 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -485,10 +485,6 @@ void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { young_gen()->eden_space()->ensure_parsability(); } -size_t ParallelScavengeHeap::unsafe_max_alloc() { - return young_gen()->eden_space()->free_in_bytes(); -} - size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { return young_gen()->eden_space()->tlab_capacity(thr); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp index e24936fb319788a1ae829cd102167795ad19ce2c..5173ff94ece5ea422d84d5d41af90e2965cbf65e 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp @@ -184,8 +184,6 @@ class ParallelScavengeHeap : public CollectedHeap { void accumulate_statistics_all_tlabs(); void resize_all_tlabs(); - size_t unsafe_max_alloc(); - bool supports_tlab_allocation() const { return true; } size_t tlab_capacity(Thread* thr) const; diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp index 0c4dd65a0069a47f7b444ea70b14c0b1eb08d97f..d4fa7ffc0ec2e4f5de08ff08d73a5f9f7d89b1ad 100644 --- a/src/share/vm/gc_interface/collectedHeap.hpp +++ b/src/share/vm/gc_interface/collectedHeap.hpp @@ -395,15 +395,6 @@ class CollectedHeap : public CHeapObj { // allocation from them and necessitating allocation of new TLABs. virtual void ensure_parsability(bool retire_tlabs); - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc() = 0; - // Section on thread-local allocation buffers (TLABs) // If the heap supports thread-local allocation buffers, it should override // the following methods: diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp index 1c651204ed8728166bf62f971acec9e47f801ab7..dcb6a1fdec1bacd50b07ce81bcb1be85658f444e 100644 --- a/src/share/vm/memory/genCollectedHeap.cpp +++ b/src/share/vm/memory/genCollectedHeap.cpp @@ -704,10 +704,6 @@ HeapWord** GenCollectedHeap::end_addr() const { return _gens[0]->end_addr(); } -size_t GenCollectedHeap::unsafe_max_alloc() { - return _gens[0]->unsafe_max_alloc_nogc(); -} - // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp index d5d4c87e45afe00711bfcb6b013ef3330f1aa1a5..77ae3ad88dda8cb2fc42ad76528fdf005f8d95b6 100644 --- a/src/share/vm/memory/genCollectedHeap.hpp +++ b/src/share/vm/memory/genCollectedHeap.hpp @@ -166,14 +166,6 @@ public: HeapWord** top_addr() const; HeapWord** end_addr() const; - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection activity. In a generational - // collector, for example, this is probably the largest allocation that - // could be supported in the youngest generation. It is "unsafe" because - // no locks are taken; the result should be treated as an approximation, - // not a guarantee. - size_t unsafe_max_alloc(); - // Does this heap support heap inspection? (+PrintClassHistogram) virtual bool supports_heap_inspection() const { return true; }