提交 80a47f09 编写于 作者: B brutisso

Merge

......@@ -467,7 +467,7 @@ public class ObjectHeap {
liveRegions.add(tlab.start());
liveRegions.add(tlab.start());
liveRegions.add(tlab.top());
liveRegions.add(tlab.end());
liveRegions.add(tlab.hardEnd());
}
}
}
......
......@@ -27,6 +27,7 @@ package sun.jvm.hotspot.runtime;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.types.*;
/** <P> ThreadLocalAllocBuffer: a descriptor for thread-local storage
......@@ -62,9 +63,22 @@ public class ThreadLocalAllocBuffer extends VMObject {
super(addr);
}
public Address start() { return startField.getValue(addr); }
public Address end() { return endField.getValue(addr); }
public Address top() { return topField.getValue(addr); }
public Address start() { return startField.getValue(addr); }
public Address end() { return endField.getValue(addr); }
public Address top() { return topField.getValue(addr); }
public Address hardEnd() { return end().addOffsetTo(alignmentReserve()); }
private long alignmentReserve() {
return Oop.alignObjectSize(endReserve());
}
private long endReserve() {
long minFillerArraySize = Array.baseOffsetInBytes(BasicType.T_INT);
long reserveForAllocationPrefetch = VM.getVM().getReserveForAllocationPrefetch();
long heapWordSize = VM.getVM().getHeapWordSize();
return Math.max(minFillerArraySize, reserveForAllocationPrefetch * heapWordSize);
}
/** Support for iteration over heap -- not sure how this will
interact with GC in reflective system, but necessary for the
......
......@@ -114,6 +114,7 @@ public class VM {
private int invalidOSREntryBCI;
private ReversePtrs revPtrs;
private VMRegImpl vmregImpl;
private int reserveForAllocationPrefetch;
// System.getProperties from debuggee VM
private Properties sysProps;
......@@ -293,6 +294,10 @@ public class VM {
vmRelease = CStringUtilities.getString(releaseAddr);
Address vmInternalInfoAddr = vmVersion.getAddressField("_s_internal_vm_info_string").getValue();
vmInternalInfo = CStringUtilities.getString(vmInternalInfoAddr);
CIntegerType intType = (CIntegerType) db.lookupType("int");
CIntegerField reserveForAllocationPrefetchField = vmVersion.getCIntegerField("_reserve_for_allocation_prefetch");
reserveForAllocationPrefetch = (int)reserveForAllocationPrefetchField.getCInteger(intType);
} catch (Exception exp) {
throw new RuntimeException("can't determine target's VM version : " + exp.getMessage());
}
......@@ -778,6 +783,10 @@ public class VM {
return vmInternalInfo;
}
public int getReserveForAllocationPrefetch() {
return reserveForAllocationPrefetch;
}
public boolean isSharingEnabled() {
if (sharingEnabled == null) {
Flag flag = getCommandLineFlag("UseSharedSpaces");
......
......@@ -371,8 +371,8 @@ class ConcurrentMark: public CHeapObj<mtGC> {
friend class CalcLiveObjectsClosure;
friend class G1CMRefProcTaskProxy;
friend class G1CMRefProcTaskExecutor;
friend class G1CMParKeepAliveAndDrainClosure;
friend class G1CMParDrainMarkingStackClosure;
friend class G1CMKeepAliveAndDrainClosure;
friend class G1CMDrainMarkingStackClosure;
protected:
ConcurrentMarkThread* _cmThread; // the thread doing the work
......@@ -499,17 +499,26 @@ protected:
}
// accessor methods
uint parallel_marking_threads() { return _parallel_marking_threads; }
uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
double sleep_factor() { return _sleep_factor; }
double marking_task_overhead() { return _marking_task_overhead;}
double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
double cleanup_task_overhead() { return _cleanup_task_overhead;}
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
uint active_tasks() { return _active_tasks; }
ParallelTaskTerminator* terminator() { return &_terminator; }
uint parallel_marking_threads() const { return _parallel_marking_threads; }
uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
double sleep_factor() { return _sleep_factor; }
double marking_task_overhead() { return _marking_task_overhead;}
double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
double cleanup_task_overhead() { return _cleanup_task_overhead;}
bool use_parallel_marking_threads() const {
assert(parallel_marking_threads() <=
max_parallel_marking_threads(), "sanity");
assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
parallel_marking_threads() > 0,
"parallel workers not set up correctly");
return _parallel_workers != NULL;
}
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
uint active_tasks() { return _active_tasks; }
ParallelTaskTerminator* terminator() { return &_terminator; }
// It claims the next available region to be scanned by a marking
// task/thread. It might return NULL if the next region is empty or
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -235,6 +235,18 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
if (NewSize + OldSize > MaxHeapSize) {
MaxHeapSize = NewSize + OldSize;
}
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
// NewRatio will be used later to set the young generation size so we use
// it to calculate how big the heap should be based on the requested OldSize
// and NewRatio.
assert(NewRatio > 0, "NewRatio should have been set up earlier");
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
MaxHeapSize = calculated_heapsize;
InitialHeapSize = calculated_heapsize;
}
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
always_do_update_barrier = UseConcMarkSweepGC;
......@@ -384,14 +396,15 @@ void GenCollectorPolicy::initialize_size_info() {
// keeping it simple also seems a worthwhile goal.
bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
size_t* gen1_size_ptr,
size_t heap_size,
size_t min_gen0_size) {
const size_t heap_size,
const size_t min_gen1_size) {
bool result = false;
if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
if (((*gen0_size_ptr + OldSize) > heap_size) &&
(heap_size - min_gen0_size) >= min_alignment()) {
// Adjust gen0 down to accomodate OldSize
*gen0_size_ptr = heap_size - min_gen0_size;
if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
(heap_size >= min_gen1_size + min_alignment())) {
// Adjust gen0 down to accommodate min_gen1_size
*gen0_size_ptr = heap_size - min_gen1_size;
*gen0_size_ptr =
MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
min_alignment());
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -321,7 +321,7 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
// Returns true is gen0 sizes were adjusted
bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
size_t heap_size, size_t min_gen1_size);
const size_t heap_size, const size_t min_gen1_size);
};
class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
......
......@@ -1737,10 +1737,10 @@ void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
*class_chunk_word_size = ClassSmallChunk;
break;
}
assert(chunk_word_size != 0 && class_chunk_word_size != 0,
assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
" class " SIZE_FORMAT,
chunk_word_size, class_chunk_word_size));
*chunk_word_size, *class_chunk_word_size));
}
size_t SpaceManager::sum_free_in_chunks_in_use() const {
......@@ -2040,7 +2040,7 @@ SpaceManager::~SpaceManager() {
align_size_up(humongous_chunks->word_size(),
HumongousChunkGranularity),
err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
" granularity " SIZE_FORMAT,
" granularity %d",
humongous_chunks->word_size(), HumongousChunkGranularity));
Metachunk* next_humongous_chunks = humongous_chunks->next();
chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
......@@ -2264,7 +2264,8 @@ void SpaceManager::verify_allocation_total() {
}
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
assert(allocation_total() == sum_used_in_chunks_in_use(),
err_msg("allocation total is not consistent %d vs %d",
err_msg("allocation total is not consistent " SIZE_FORMAT
" vs " SIZE_FORMAT,
allocation_total(), sum_used_in_chunks_in_use()));
}
......@@ -2578,7 +2579,8 @@ void Metaspace::global_initialize() {
// argument passed in is at the top of the compressed space
void Metaspace::initialize_class_space(ReservedSpace rs) {
// The reserved space size may be bigger because of alignment, esp with UseLargePages
assert(rs.size() >= ClassMetaspaceSize, err_msg("%d != %d", rs.size(), ClassMetaspaceSize));
assert(rs.size() >= ClassMetaspaceSize,
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
_class_space_list = new VirtualSpaceList(rs);
}
......
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -1816,7 +1816,7 @@ class CommandLineFlags {
\
product(uintx, InitiatingHeapOccupancyPercent, 45, \
"Percentage of the (entire) heap occupancy to start a " \
"concurrent GC cycle. It us used by GCs that trigger a " \
"concurrent GC cycle. It is used by GCs that trigger a " \
"concurrent GC cycle based on the occupancy of the entire heap, " \
"not just one of the generations (e.g., G1). A value of 0 " \
"denotes 'do constant GC cycles'.") \
......
......@@ -1500,7 +1500,7 @@ JavaThread::JavaThread(bool is_attaching_via_jni) :
} else {
_jni_attach_state = _not_attaching_via_jni;
}
assert(_deferred_card_mark.is_empty(), "Default MemRegion ctor");
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
_safepoint_visible = false;
}
......@@ -1896,9 +1896,16 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
JvmtiExport::cleanup_thread(this);
}
#ifndef SERIALGC
// We must flush G1-related buffers before removing a thread from
// We must flush any deferred card marks before removing a thread from
// the list of active threads.
Universe::heap()->flush_deferred_store_barrier(this);
assert(deferred_card_mark().is_empty(), "Should have been flushed");
#ifndef SERIALGC
// We must flush the G1-related buffers before removing a thread
// from the list of active threads. We must do this after any deferred
// card marks have been flushed (above) so that any entries that are
// added to the thread's dirty card queue as a result are not lost.
if (UseG1GC) {
flush_barrier_queues();
}
......
......@@ -1161,6 +1161,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
static_field(Abstract_VM_Version, _vm_major_version, int) \
static_field(Abstract_VM_Version, _vm_minor_version, int) \
static_field(Abstract_VM_Version, _vm_build_number, int) \
static_field(Abstract_VM_Version, _reserve_for_allocation_prefetch, int) \
\
static_field(JDK_Version, _current, JDK_Version) \
nonstatic_field(JDK_Version, _partially_initialized, bool) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册