提交 b8fe0ca1 编写于 作者: T tonyp

6994322: Remove the is_tlab and is_noref / is_large_noref parameters from the CollectedHeap

Summary: Remove two unused parameters from the mem_allocate() method and update its uses accordingly.
Reviewed-by: stefank, johnc
上级 fe786d27
...@@ -829,12 +829,8 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { ...@@ -829,12 +829,8 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
HeapWord* HeapWord*
G1CollectedHeap::mem_allocate(size_t word_size, G1CollectedHeap::mem_allocate(size_t word_size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) { bool* gc_overhead_limit_was_exceeded) {
assert_heap_not_locked_and_not_at_safepoint(); assert_heap_not_locked_and_not_at_safepoint();
assert(!is_tlab, "mem_allocate() this should not be called directly "
"to allocate TLABs");
// Loop until the allocation is satisified, or unsatisfied after GC. // Loop until the allocation is satisified, or unsatisfied after GC.
for (int try_count = 1; /* we'll return */; try_count += 1) { for (int try_count = 1; /* we'll return */; try_count += 1) {
...@@ -2622,11 +2618,6 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { ...@@ -2622,11 +2618,6 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
} }
} }
size_t G1CollectedHeap::large_typearray_limit() {
// FIXME
return HeapRegion::GrainBytes/HeapWordSize;
}
size_t G1CollectedHeap::max_capacity() const { size_t G1CollectedHeap::max_capacity() const {
return _g1_reserved.byte_size(); return _g1_reserved.byte_size();
} }
......
...@@ -446,8 +446,7 @@ protected: ...@@ -446,8 +446,7 @@ protected:
// * All allocation requests for new TLABs should go to // * All allocation requests for new TLABs should go to
// allocate_new_tlab(). // allocate_new_tlab().
// //
// * All non-TLAB allocation requests should go to mem_allocate() // * All non-TLAB allocation requests should go to mem_allocate().
// and mem_allocate() should never be called with is_tlab == true.
// //
// * If either call cannot satisfy the allocation request using the // * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If // current allocating region, they will try to get a new one. If
...@@ -467,8 +466,6 @@ protected: ...@@ -467,8 +466,6 @@ protected:
virtual HeapWord* allocate_new_tlab(size_t word_size); virtual HeapWord* allocate_new_tlab(size_t word_size);
virtual HeapWord* mem_allocate(size_t word_size, virtual HeapWord* mem_allocate(size_t word_size,
bool is_noref,
bool is_tlab, /* expected to be false */
bool* gc_overhead_limit_was_exceeded); bool* gc_overhead_limit_was_exceeded);
// The following three methods take a gc_count_before_ret // The following three methods take a gc_count_before_ret
...@@ -1304,10 +1301,6 @@ public: ...@@ -1304,10 +1301,6 @@ public:
return true; return true;
} }
// The boundary between a "large" and "small" array of primitives, in
// words.
virtual size_t large_typearray_limit();
// Returns "true" iff the given word_size is "very large". // Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) { static bool isHumongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs // Note this has to be strictly greater-than as the TLABs
......
...@@ -386,8 +386,6 @@ bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { ...@@ -386,8 +386,6 @@ bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
// we rely on the size_policy object to force a bail out. // we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate( HeapWord* ParallelScavengeHeap::mem_allocate(
size_t size, size_t size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) { bool* gc_overhead_limit_was_exceeded) {
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
...@@ -398,7 +396,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate( ...@@ -398,7 +396,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
// limit is being exceeded as checked below. // limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false; *gc_overhead_limit_was_exceeded = false;
HeapWord* result = young_gen()->allocate(size, is_tlab); HeapWord* result = young_gen()->allocate(size);
uint loop_count = 0; uint loop_count = 0;
uint gc_count = 0; uint gc_count = 0;
...@@ -419,7 +417,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate( ...@@ -419,7 +417,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
MutexLocker ml(Heap_lock); MutexLocker ml(Heap_lock);
gc_count = Universe::heap()->total_collections(); gc_count = Universe::heap()->total_collections();
result = young_gen()->allocate(size, is_tlab); result = young_gen()->allocate(size);
// (1) If the requested object is too large to easily fit in the // (1) If the requested object is too large to easily fit in the
// young_gen, or // young_gen, or
...@@ -433,21 +431,13 @@ HeapWord* ParallelScavengeHeap::mem_allocate( ...@@ -433,21 +431,13 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if (result != NULL) { if (result != NULL) {
return result; return result;
} }
if (!is_tlab && if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { result = old_gen()->allocate(size);
result = old_gen()->allocate(size, is_tlab);
if (result != NULL) { if (result != NULL) {
return result; return result;
} }
} }
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
// GC is locked out. If this is a TLAB allocation,
// return NULL; the requestor will retry allocation
// of an idividual object at a time.
if (is_tlab) {
return NULL;
}
// If this thread is not in a jni critical section, we stall // If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and // the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is // GC allowed. When the critical section clears, a GC is
...@@ -472,7 +462,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate( ...@@ -472,7 +462,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if (result == NULL) { if (result == NULL) {
// Generate a VM operation // Generate a VM operation
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); VM_ParallelGCFailedAllocation op(size, gc_count);
VMThread::execute(&op); VMThread::execute(&op);
// Did the VM operation execute? If so, return the result directly. // Did the VM operation execute? If so, return the result directly.
...@@ -526,7 +516,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate( ...@@ -526,7 +516,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if ((result == NULL) && (QueuedAllocationWarningCount > 0) && if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) { (loop_count % QueuedAllocationWarningCount == 0)) {
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
" size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); " size=%d", loop_count, size);
} }
} }
...@@ -539,7 +529,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate( ...@@ -539,7 +529,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
// time over limit here, that is the responsibility of the heap specific // time over limit here, that is the responsibility of the heap specific
// collection methods. This method decides where to attempt allocations, // collection methods. This method decides where to attempt allocations,
// and when to attempt collections, but no collection specific policy. // and when to attempt collections, but no collection specific policy.
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant"); assert(!Universe::heap()->is_gc_active(), "not reentrant");
...@@ -553,7 +543,7 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { ...@@ -553,7 +543,7 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
// First level allocation failure, scavenge and allocate in young gen. // First level allocation failure, scavenge and allocate in young gen.
GCCauseSetter gccs(this, GCCause::_allocation_failure); GCCauseSetter gccs(this, GCCause::_allocation_failure);
PSScavenge::invoke(); PSScavenge::invoke();
HeapWord* result = young_gen()->allocate(size, is_tlab); HeapWord* result = young_gen()->allocate(size);
// Second level allocation failure. // Second level allocation failure.
// Mark sweep and allocate in young generation. // Mark sweep and allocate in young generation.
...@@ -562,28 +552,28 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { ...@@ -562,28 +552,28 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
// Don't mark sweep twice if so. // Don't mark sweep twice if so.
if (mark_sweep_invocation_count == total_invocations()) { if (mark_sweep_invocation_count == total_invocations()) {
invoke_full_gc(false); invoke_full_gc(false);
result = young_gen()->allocate(size, is_tlab); result = young_gen()->allocate(size);
} }
} }
// Third level allocation failure. // Third level allocation failure.
// After mark sweep and young generation allocation failure, // After mark sweep and young generation allocation failure,
// allocate in old generation. // allocate in old generation.
if (result == NULL && !is_tlab) { if (result == NULL) {
result = old_gen()->allocate(size, is_tlab); result = old_gen()->allocate(size);
} }
// Fourth level allocation failure. We're running out of memory. // Fourth level allocation failure. We're running out of memory.
// More complete mark sweep and allocate in young generation. // More complete mark sweep and allocate in young generation.
if (result == NULL) { if (result == NULL) {
invoke_full_gc(true); invoke_full_gc(true);
result = young_gen()->allocate(size, is_tlab); result = young_gen()->allocate(size);
} }
// Fifth level allocation failure. // Fifth level allocation failure.
// After more complete mark sweep, allocate in old generation. // After more complete mark sweep, allocate in old generation.
if (result == NULL && !is_tlab) { if (result == NULL) {
result = old_gen()->allocate(size, is_tlab); result = old_gen()->allocate(size);
} }
return result; return result;
...@@ -761,7 +751,7 @@ size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { ...@@ -761,7 +751,7 @@ size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
} }
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
return young_gen()->allocate(size, true); return young_gen()->allocate(size);
} }
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
......
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -165,12 +165,13 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector ...@@ -165,12 +165,13 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
// an excessive amount of time is being spent doing collections // an excessive amount of time is being spent doing collections
// and caused a NULL to be returned. If a NULL is not returned, // and caused a NULL to be returned. If a NULL is not returned,
// "gc_time_limit_was_exceeded" has an undefined meaning. // "gc_time_limit_was_exceeded" has an undefined meaning.
HeapWord* mem_allocate(size_t size, HeapWord* mem_allocate(size_t size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded); bool* gc_overhead_limit_was_exceeded);
HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
// Allocation attempt(s) during a safepoint. It should never be called
// to allocate a new TLAB as this allocation might be satisfied out
// of the old generation.
HeapWord* failed_mem_allocate(size_t size);
HeapWord* permanent_mem_allocate(size_t size); HeapWord* permanent_mem_allocate(size_t size);
HeapWord* failed_permanent_mem_allocate(size_t size); HeapWord* failed_permanent_mem_allocate(size_t size);
...@@ -194,8 +195,6 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector ...@@ -194,8 +195,6 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
inline void invoke_scavenge(); inline void invoke_scavenge();
inline void invoke_full_gc(bool maximum_compaction); inline void invoke_full_gc(bool maximum_compaction);
size_t large_typearray_limit() { return FastAllocateSizeLimit; }
bool supports_inline_contig_alloc() const { return !UseNUMA; } bool supports_inline_contig_alloc() const { return !UseNUMA; }
HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
......
...@@ -182,12 +182,12 @@ size_t PSOldGen::contiguous_available() const { ...@@ -182,12 +182,12 @@ size_t PSOldGen::contiguous_available() const {
// Allocation. We report all successful allocations to the size policy // Allocation. We report all successful allocations to the size policy
// Note that the perm gen does not use this method, and should not! // Note that the perm gen does not use this method, and should not!
HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) { HeapWord* PSOldGen::allocate(size_t word_size) {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
HeapWord* res = allocate_noexpand(word_size, is_tlab); HeapWord* res = allocate_noexpand(word_size);
if (res == NULL) { if (res == NULL) {
res = expand_and_allocate(word_size, is_tlab); res = expand_and_allocate(word_size);
} }
// Allocations in the old generation need to be reported // Allocations in the old generation need to be reported
...@@ -199,13 +199,12 @@ HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) { ...@@ -199,13 +199,12 @@ HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
return res; return res;
} }
HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) { HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
assert(!is_tlab, "TLAB's are not supported in PSOldGen");
expand(word_size*HeapWordSize); expand(word_size*HeapWordSize);
if (GCExpandToAllocateDelayMillis > 0) { if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
} }
return allocate_noexpand(word_size, is_tlab); return allocate_noexpand(word_size);
} }
HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
......
...@@ -60,9 +60,8 @@ class PSOldGen : public CHeapObj { ...@@ -60,9 +60,8 @@ class PSOldGen : public CHeapObj {
// Used when initializing the _name field. // Used when initializing the _name field.
static inline const char* select_name(); static inline const char* select_name();
HeapWord* allocate_noexpand(size_t word_size, bool is_tlab) { HeapWord* allocate_noexpand(size_t word_size) {
// We assume the heap lock is held here. // We assume the heap lock is held here.
assert(!is_tlab, "Does not support TLAB allocation");
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
HeapWord* res = object_space()->allocate(word_size); HeapWord* res = object_space()->allocate(word_size);
if (res != NULL) { if (res != NULL) {
...@@ -89,7 +88,7 @@ class PSOldGen : public CHeapObj { ...@@ -89,7 +88,7 @@ class PSOldGen : public CHeapObj {
return (res == NULL) ? expand_and_cas_allocate(word_size) : res; return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
} }
HeapWord* expand_and_allocate(size_t word_size, bool is_tlab); HeapWord* expand_and_allocate(size_t word_size);
HeapWord* expand_and_cas_allocate(size_t word_size); HeapWord* expand_and_cas_allocate(size_t word_size);
void expand(size_t bytes); void expand(size_t bytes);
bool expand_by(size_t bytes); bool expand_by(size_t bytes);
...@@ -164,7 +163,7 @@ class PSOldGen : public CHeapObj { ...@@ -164,7 +163,7 @@ class PSOldGen : public CHeapObj {
// Allocation. We report all successful allocations to the size policy // Allocation. We report all successful allocations to the size policy
// Note that the perm gen does not use this method, and should not! // Note that the perm gen does not use this method, and should not!
HeapWord* allocate(size_t word_size, bool is_tlab); HeapWord* allocate(size_t word_size);
// Iteration. // Iteration.
void oop_iterate(OopClosure* cl) { object_space()->oop_iterate(cl); } void oop_iterate(OopClosure* cl) { object_space()->oop_iterate(cl); }
......
...@@ -46,10 +46,10 @@ PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment, ...@@ -46,10 +46,10 @@ PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
HeapWord* PSPermGen::allocate_permanent(size_t size) { HeapWord* PSPermGen::allocate_permanent(size_t size) {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
HeapWord* obj = allocate_noexpand(size, false); HeapWord* obj = allocate_noexpand(size);
if (obj == NULL) { if (obj == NULL) {
obj = expand_and_allocate(size, false); obj = expand_and_allocate(size);
} }
return obj; return obj;
......
...@@ -157,7 +157,7 @@ class PSYoungGen : public CHeapObj { ...@@ -157,7 +157,7 @@ class PSYoungGen : public CHeapObj {
} }
// Allocation // Allocation
HeapWord* allocate(size_t word_size, bool is_tlab) { HeapWord* allocate(size_t word_size) {
HeapWord* result = eden_space()->cas_allocate(word_size); HeapWord* result = eden_space()->cas_allocate(word_size);
return result; return result;
} }
......
...@@ -33,10 +33,9 @@ ...@@ -33,10 +33,9 @@
// The following methods are used by the parallel scavenge collector // The following methods are used by the parallel scavenge collector
VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size, VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
bool is_tlab, unsigned int gc_count) : unsigned int gc_count) :
VM_GC_Operation(gc_count, GCCause::_allocation_failure), VM_GC_Operation(gc_count, GCCause::_allocation_failure),
_size(size), _size(size),
_is_tlab(is_tlab),
_result(NULL) _result(NULL)
{ {
} }
...@@ -48,7 +47,7 @@ void VM_ParallelGCFailedAllocation::doit() { ...@@ -48,7 +47,7 @@ void VM_ParallelGCFailedAllocation::doit() {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
GCCauseSetter gccs(heap, _gc_cause); GCCauseSetter gccs(heap, _gc_cause);
_result = heap->failed_mem_allocate(_size, _is_tlab); _result = heap->failed_mem_allocate(_size);
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
......
/* /*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -32,12 +32,10 @@ ...@@ -32,12 +32,10 @@
class VM_ParallelGCFailedAllocation: public VM_GC_Operation { class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
private: private:
size_t _size; size_t _size;
bool _is_tlab;
HeapWord* _result; HeapWord* _result;
public: public:
VM_ParallelGCFailedAllocation(size_t size, bool is_tlab, VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
unsigned int gc_count);
virtual VMOp_Type type() const { virtual VMOp_Type type() const {
return VMOp_ParallelGCFailedAllocation; return VMOp_ParallelGCFailedAllocation;
......
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -93,7 +93,7 @@ class CollectedHeap : public CHeapObj { ...@@ -93,7 +93,7 @@ class CollectedHeap : public CHeapObj {
// pure virtual. // pure virtual.
void pre_initialize(); void pre_initialize();
// Create a new tlab // Create a new tlab. All TLAB allocations must go through this.
virtual HeapWord* allocate_new_tlab(size_t size); virtual HeapWord* allocate_new_tlab(size_t size);
// Accumulate statistics on all tlabs. // Accumulate statistics on all tlabs.
...@@ -109,11 +109,11 @@ class CollectedHeap : public CHeapObj { ...@@ -109,11 +109,11 @@ class CollectedHeap : public CHeapObj {
// Allocate an uninitialized block of the given size, or returns NULL if // Allocate an uninitialized block of the given size, or returns NULL if
// this is impossible. // this is impossible.
inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS); inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
// Like allocate_init, but the block returned by a successful allocation // Like allocate_init, but the block returned by a successful allocation
// is guaranteed initialized to zeros. // is guaranteed initialized to zeros.
inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS); inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
// Same as common_mem version, except memory is allocated in the permanent area // Same as common_mem version, except memory is allocated in the permanent area
// If there is no permanent area, revert to common_mem_allocate_noinit // If there is no permanent area, revert to common_mem_allocate_noinit
...@@ -322,7 +322,6 @@ class CollectedHeap : public CHeapObj { ...@@ -322,7 +322,6 @@ class CollectedHeap : public CHeapObj {
// General obj/array allocation facilities. // General obj/array allocation facilities.
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
// Special obj/array allocation facilities. // Special obj/array allocation facilities.
// Some heaps may want to manage "permanent" data uniquely. These default // Some heaps may want to manage "permanent" data uniquely. These default
...@@ -345,16 +344,12 @@ class CollectedHeap : public CHeapObj { ...@@ -345,16 +344,12 @@ class CollectedHeap : public CHeapObj {
// Raw memory allocation facilities // Raw memory allocation facilities
// The obj and array allocate methods are covers for these methods. // The obj and array allocate methods are covers for these methods.
// The permanent allocation method should default to mem_allocate if // The permanent allocation method should default to mem_allocate if
// permanent memory isn't supported. // permanent memory isn't supported. mem_allocate() should never be
// called to allocate TLABs, only individual objects.
virtual HeapWord* mem_allocate(size_t size, virtual HeapWord* mem_allocate(size_t size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) = 0; bool* gc_overhead_limit_was_exceeded) = 0;
virtual HeapWord* permanent_mem_allocate(size_t size) = 0; virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
// The boundary between a "large" and "small" array of primitives, in words.
virtual size_t large_typearray_limit() = 0;
// Utilities for turning raw memory into filler objects. // Utilities for turning raw memory into filler objects.
// //
// min_fill_size() is the smallest region that can be filled. // min_fill_size() is the smallest region that can be filled.
......
...@@ -122,7 +122,7 @@ void CollectedHeap::post_allocation_setup_array(KlassHandle klass, ...@@ -122,7 +122,7 @@ void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
post_allocation_notify(klass, (oop)obj); post_allocation_notify(klass, (oop)obj);
} }
HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) { HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
// Clear unhandled oops for memory allocation. Memory allocation might // Clear unhandled oops for memory allocation. Memory allocation might
// not take out a lock if from tlab, so clear here. // not take out a lock if from tlab, so clear here.
...@@ -133,7 +133,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, ...@@ -133,7 +133,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref,
return NULL; // caller does a CHECK_0 too return NULL; // caller does a CHECK_0 too
} }
// We may want to update this, is_noref objects might not be allocated in TLABs.
HeapWord* result = NULL; HeapWord* result = NULL;
if (UseTLAB) { if (UseTLAB) {
result = CollectedHeap::allocate_from_tlab(THREAD, size); result = CollectedHeap::allocate_from_tlab(THREAD, size);
...@@ -145,8 +144,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, ...@@ -145,8 +144,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref,
} }
bool gc_overhead_limit_was_exceeded = false; bool gc_overhead_limit_was_exceeded = false;
result = Universe::heap()->mem_allocate(size, result = Universe::heap()->mem_allocate(size,
is_noref,
false,
&gc_overhead_limit_was_exceeded); &gc_overhead_limit_was_exceeded);
if (result != NULL) { if (result != NULL) {
NOT_PRODUCT(Universe::heap()-> NOT_PRODUCT(Universe::heap()->
...@@ -183,8 +180,8 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, ...@@ -183,8 +180,8 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref,
} }
} }
HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) { HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) {
HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL); HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
init_obj(obj, size); init_obj(obj, size);
return obj; return obj;
} }
...@@ -255,7 +252,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) { ...@@ -255,7 +252,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
debug_only(check_for_valid_allocation_state()); debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL); HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_obj(klass, obj, size); post_allocation_setup_obj(klass, obj, size);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj; return (oop)obj;
...@@ -268,20 +265,7 @@ oop CollectedHeap::array_allocate(KlassHandle klass, ...@@ -268,20 +265,7 @@ oop CollectedHeap::array_allocate(KlassHandle klass,
debug_only(check_for_valid_allocation_state()); debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL); HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_array(klass, obj, size, length);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
}
oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
int size,
int length,
TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
post_allocation_setup_array(klass, obj, size, length); post_allocation_setup_array(klass, obj, size, length);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj; return (oop)obj;
......
...@@ -750,10 +750,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, ...@@ -750,10 +750,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
return NULL; return NULL;
} }
size_t GenCollectorPolicy::large_typearray_limit() {
return FastAllocateSizeLimit;
}
// Return true if any of the following is true: // Return true if any of the following is true:
// . the allocation won't fit into the current young gen heap // . the allocation won't fit into the current young gen heap
// . gc locker is occupied (jni critical section) // . gc locker is occupied (jni critical section)
......
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -280,9 +280,6 @@ class GenCollectorPolicy : public CollectorPolicy { ...@@ -280,9 +280,6 @@ class GenCollectorPolicy : public CollectorPolicy {
HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab); HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
// The size that defines a "large array".
virtual size_t large_typearray_limit();
// Adaptive size policy // Adaptive size policy
virtual void initialize_size_policy(size_t init_eden_size, virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size, size_t init_promo_size,
......
...@@ -434,11 +434,9 @@ HeapWord* GenCollectedHeap::attempt_allocation(size_t size, ...@@ -434,11 +434,9 @@ HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
} }
HeapWord* GenCollectedHeap::mem_allocate(size_t size, HeapWord* GenCollectedHeap::mem_allocate(size_t size,
bool is_large_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) { bool* gc_overhead_limit_was_exceeded) {
return collector_policy()->mem_allocate_work(size, return collector_policy()->mem_allocate_work(size,
is_tlab, false /* is_tlab */,
gc_overhead_limit_was_exceeded); gc_overhead_limit_was_exceeded);
} }
...@@ -1120,11 +1118,9 @@ size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { ...@@ -1120,11 +1118,9 @@ size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
bool gc_overhead_limit_was_exceeded; bool gc_overhead_limit_was_exceeded;
HeapWord* result = mem_allocate(size /* size */, return collector_policy()->mem_allocate_work(size /* size */,
false /* is_large_noref */, true /* is_tlab */,
true /* is_tlab */, &gc_overhead_limit_was_exceeded);
&gc_overhead_limit_was_exceeded);
return result;
} }
// Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
...@@ -1179,10 +1175,6 @@ void GenCollectedHeap::release_scratch() { ...@@ -1179,10 +1175,6 @@ void GenCollectedHeap::release_scratch() {
} }
} }
size_t GenCollectedHeap::large_typearray_limit() {
return gen_policy()->large_typearray_limit();
}
class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
void do_generation(Generation* gen) { void do_generation(Generation* gen) {
gen->prepare_for_verify(); gen->prepare_for_verify();
......
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -161,8 +161,6 @@ public: ...@@ -161,8 +161,6 @@ public:
size_t max_capacity() const; size_t max_capacity() const;
HeapWord* mem_allocate(size_t size, HeapWord* mem_allocate(size_t size,
bool is_large_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded); bool* gc_overhead_limit_was_exceeded);
// We may support a shared contiguous allocation area, if the youngest // We may support a shared contiguous allocation area, if the youngest
...@@ -315,8 +313,6 @@ public: ...@@ -315,8 +313,6 @@ public:
// contributed as it needs. // contributed as it needs.
void release_scratch(); void release_scratch();
size_t large_typearray_limit();
// Ensure parsability: override // Ensure parsability: override
virtual void ensure_parsability(bool retire_tlabs); virtual void ensure_parsability(bool retire_tlabs);
......
...@@ -84,11 +84,7 @@ typeArrayOop typeArrayKlass::allocate(int length, TRAPS) { ...@@ -84,11 +84,7 @@ typeArrayOop typeArrayKlass::allocate(int length, TRAPS) {
KlassHandle h_k(THREAD, as_klassOop()); KlassHandle h_k(THREAD, as_klassOop());
typeArrayOop t; typeArrayOop t;
CollectedHeap* ch = Universe::heap(); CollectedHeap* ch = Universe::heap();
if (size < ch->large_typearray_limit()) { t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
} else {
t = (typeArrayOop)CollectedHeap::large_typearray_allocate(h_k, (int)size, length, CHECK_NULL);
}
assert(t->is_parsable(), "Don't publish unless parsable"); assert(t->is_parsable(), "Don't publish unless parsable");
return t; return t;
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册