提交 bfb1829b 编写于 作者: T tschatzl

Merge

...@@ -2767,7 +2767,19 @@ void os::numa_make_global(char *addr, size_t bytes) { ...@@ -2767,7 +2767,19 @@ void os::numa_make_global(char *addr, size_t bytes) {
Linux::numa_interleave_memory(addr, bytes); Linux::numa_interleave_memory(addr, bytes);
} }
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
// bind policy to MPOL_PREFERRED for the current thread.
#define USE_MPOL_PREFERRED 0
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
// To make NUMA and large pages more robust when both enabled, we need to ease
// the requirements on where the memory should be allocated. MPOL_BIND is the
// default policy and it will force memory to be allocated on the specified
// node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
// the specified node, but will not force it. Using this policy will prevent
// getting SIGBUS when trying to allocate large pages on NUMA nodes with no
// free large pages.
Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
Linux::numa_tonode_memory(addr, bytes, lgrp_hint); Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
} }
...@@ -2869,6 +2881,8 @@ bool os::Linux::libnuma_init() { ...@@ -2869,6 +2881,8 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_tonode_memory"))); libnuma_dlsym(handle, "numa_tonode_memory")));
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
libnuma_dlsym(handle, "numa_interleave_memory"))); libnuma_dlsym(handle, "numa_interleave_memory")));
set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
libnuma_dlsym(handle, "numa_set_bind_policy")));
if (numa_available() != -1) { if (numa_available() != -1) {
...@@ -2935,6 +2949,7 @@ os::Linux::numa_max_node_func_t os::Linux::_numa_max_node; ...@@ -2935,6 +2949,7 @@ os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
os::Linux::numa_available_func_t os::Linux::_numa_available; os::Linux::numa_available_func_t os::Linux::_numa_available;
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
unsigned long* os::Linux::_numa_all_nodes; unsigned long* os::Linux::_numa_all_nodes;
bool os::pd_uncommit_memory(char* addr, size_t size) { bool os::pd_uncommit_memory(char* addr, size_t size) {
......
...@@ -235,6 +235,7 @@ private: ...@@ -235,6 +235,7 @@ private:
typedef int (*numa_available_func_t)(void); typedef int (*numa_available_func_t)(void);
typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
typedef void (*numa_set_bind_policy_func_t)(int policy);
static sched_getcpu_func_t _sched_getcpu; static sched_getcpu_func_t _sched_getcpu;
static numa_node_to_cpus_func_t _numa_node_to_cpus; static numa_node_to_cpus_func_t _numa_node_to_cpus;
...@@ -242,6 +243,7 @@ private: ...@@ -242,6 +243,7 @@ private:
static numa_available_func_t _numa_available; static numa_available_func_t _numa_available;
static numa_tonode_memory_func_t _numa_tonode_memory; static numa_tonode_memory_func_t _numa_tonode_memory;
static numa_interleave_memory_func_t _numa_interleave_memory; static numa_interleave_memory_func_t _numa_interleave_memory;
static numa_set_bind_policy_func_t _numa_set_bind_policy;
static unsigned long* _numa_all_nodes; static unsigned long* _numa_all_nodes;
static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
...@@ -250,6 +252,7 @@ private: ...@@ -250,6 +252,7 @@ private:
static void set_numa_available(numa_available_func_t func) { _numa_available = func; } static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
static int sched_getcpu_syscall(void); static int sched_getcpu_syscall(void);
public: public:
...@@ -267,6 +270,11 @@ public: ...@@ -267,6 +270,11 @@ public:
_numa_interleave_memory(start, size, _numa_all_nodes); _numa_interleave_memory(start, size, _numa_all_nodes);
} }
} }
static void numa_set_bind_policy(int policy) {
if (_numa_set_bind_policy != NULL) {
_numa_set_bind_policy(policy);
}
}
static int get_node_by_cpu(int cpu_id); static int get_node_by_cpu(int cpu_id);
}; };
......
...@@ -44,6 +44,6 @@ define_pd_global(uintx,JVMInvokeMethodSlack, 10*K); ...@@ -44,6 +44,6 @@ define_pd_global(uintx,JVMInvokeMethodSlack, 10*K);
define_pd_global(intx, CompilerThreadStackSize, 0); define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address // Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(uintx,HeapBaseMinAddress, 256*M); define_pd_global(uintx,HeapBaseMinAddress, 2*G);
#endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP #endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP
...@@ -2493,11 +2493,11 @@ void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) { ...@@ -2493,11 +2493,11 @@ void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
void G1CollectedHeap::register_concurrent_cycle_end() { void G1CollectedHeap::register_concurrent_cycle_end() {
if (_concurrent_cycle_started) { if (_concurrent_cycle_started) {
_gc_timer_cm->register_gc_end(os::elapsed_counter());
if (_cm->has_aborted()) { if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure(); _gc_tracer_cm->report_concurrent_mode_failure();
} }
_gc_timer_cm->register_gc_end(os::elapsed_counter());
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
_concurrent_cycle_started = false; _concurrent_cycle_started = false;
......
...@@ -168,7 +168,15 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -168,7 +168,15 @@ G1CollectorPolicy::G1CollectorPolicy() :
// Set up the region size and associated fields. Given that the // Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here, // policy is created before the heap, we have to set this up here,
// so it's done as soon as possible. // so it's done as soon as possible.
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
// It would have been natural to pass initial_heap_byte_size() and
// max_heap_byte_size() to setup_heap_region_size() but those have
// not been set up at this point since they should be aligned with
// the region size. So, there is a circular dependency here. We base
// the region size on the heap size, but the heap size should be
// aligned with the region size. To get around this we use the
// unaligned values for the heap.
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size(); HeapRegionRemSet::setup_remset_size();
G1ErgoVerbose::initialize(); G1ErgoVerbose::initialize();
......
...@@ -149,18 +149,11 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, ...@@ -149,18 +149,11 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
// many regions in the heap (based on the min heap size). // many regions in the heap (based on the min heap size).
#define TARGET_REGION_NUMBER 2048 #define TARGET_REGION_NUMBER 2048
void HeapRegion::setup_heap_region_size(uintx min_heap_size) { void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
// region_size in bytes
uintx region_size = G1HeapRegionSize; uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
// We base the automatic calculation on the min heap size. This size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
// can be problematic if the spread between min and max is quite region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
// the max size, the region size might be way too large for the
// min size. Either way, some users might have to set the region
// size manually for some -Xms / -Xmx combos.
region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
(uintx) MIN_REGION_SIZE); (uintx) MIN_REGION_SIZE);
} }
......
...@@ -361,7 +361,7 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -361,7 +361,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// CardsPerRegion). All those fields are considered constant // CardsPerRegion). All those fields are considered constant
// throughout the JVM's execution, therefore they should only be set // throughout the JVM's execution, therefore they should only be set
// up once during initialization time. // up once during initialization time.
static void setup_heap_region_size(uintx min_heap_size); static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
enum ClaimValues { enum ClaimValues {
InitialClaimValue = 0, InitialClaimValue = 0,
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp"
#include "runtime/os.hpp"
#include "trace/tracing.hpp" #include "trace/tracing.hpp"
#include "trace/traceBackend.hpp" #include "trace/traceBackend.hpp"
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
...@@ -54,11 +55,12 @@ void GCTracer::send_garbage_collection_event() const { ...@@ -54,11 +55,12 @@ void GCTracer::send_garbage_collection_event() const {
} }
void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
EventGCReferenceStatistics e; EventGCReferenceStatistics e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_type((u1)type); e.set_type((u1)type);
e.set_count(count); e.set_count(count);
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
...@@ -105,20 +107,22 @@ static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) { ...@@ -105,20 +107,22 @@ static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
} }
void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
EventPromotionFailed e; EventPromotionFailed e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_data(to_trace_struct(pf_info)); e.set_data(to_trace_struct(pf_info));
e.set_thread(pf_info.thread()->thread_id()); e.set_thread(pf_info.thread()->thread_id());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
// Common to CMS and G1 // Common to CMS and G1
void OldGCTracer::send_concurrent_mode_failure_event() { void OldGCTracer::send_concurrent_mode_failure_event() {
EventConcurrentModeFailure e; EventConcurrentModeFailure e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
...@@ -136,7 +140,7 @@ void G1NewTracer::send_g1_young_gc_event() { ...@@ -136,7 +140,7 @@ void G1NewTracer::send_g1_young_gc_event() {
} }
void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
EventEvacuationInfo e; EventEvacuationInfo e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_cSetRegions(info->collectionset_regions()); e.set_cSetRegions(info->collectionset_regions());
...@@ -147,15 +151,17 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { ...@@ -147,15 +151,17 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
e.set_bytesCopied(info->bytes_copied()); e.set_bytesCopied(info->bytes_copied());
e.set_regionsFreed(info->regions_freed()); e.set_regionsFreed(info->regions_freed());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
EventEvacuationFailed e; EventEvacuationFailed e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_data(to_trace_struct(ef_info)); e.set_data(to_trace_struct(ef_info));
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
...@@ -189,12 +195,13 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { ...@@ -189,12 +195,13 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
void visit(const GCHeapSummary* heap_summary) const { void visit(const GCHeapSummary* heap_summary) const {
const VirtualSpaceSummary& heap_space = heap_summary->heap(); const VirtualSpaceSummary& heap_space = heap_summary->heap();
EventGCHeapSummary e; EventGCHeapSummary e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_id); e.set_gcId(_id);
e.set_when((u1)_when); e.set_when((u1)_when);
e.set_heapSpace(to_trace_struct(heap_space)); e.set_heapSpace(to_trace_struct(heap_space));
e.set_heapUsed(heap_summary->used()); e.set_heapUsed(heap_summary->used());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
...@@ -209,7 +216,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { ...@@ -209,7 +216,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
const SpaceSummary& from_space = ps_heap_summary->from(); const SpaceSummary& from_space = ps_heap_summary->from();
const SpaceSummary& to_space = ps_heap_summary->to(); const SpaceSummary& to_space = ps_heap_summary->to();
EventPSHeapSummary e; EventPSHeapSummary e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_id); e.set_gcId(_id);
e.set_when((u1)_when); e.set_when((u1)_when);
...@@ -220,6 +227,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { ...@@ -220,6 +227,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
e.set_edenSpace(to_trace_struct(ps_heap_summary->eden())); e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
e.set_fromSpace(to_trace_struct(ps_heap_summary->from())); e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
e.set_toSpace(to_trace_struct(ps_heap_summary->to())); e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
...@@ -241,13 +249,14 @@ static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) { ...@@ -241,13 +249,14 @@ static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
} }
void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
EventMetaspaceSummary e; EventMetaspaceSummary e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_when((u1) when); e.set_when((u1) when);
e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
e.set_dataSpace(to_trace_struct(meta_space_summary.data_space())); e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
e.set_classSpace(to_trace_struct(meta_space_summary.class_space())); e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
...@@ -282,8 +291,6 @@ class PhaseSender : public PhaseVisitor { ...@@ -282,8 +291,6 @@ class PhaseSender : public PhaseVisitor {
default: /* Ignore sending this phase */ break; default: /* Ignore sending this phase */ break;
} }
} }
#undef send_phase
}; };
void GCTracer::send_phase_events(TimePartitions* time_partitions) const { void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
......
...@@ -1605,17 +1605,6 @@ julong Arguments::limit_by_allocatable_memory(julong limit) { ...@@ -1605,17 +1605,6 @@ julong Arguments::limit_by_allocatable_memory(julong limit) {
return result; return result;
} }
void Arguments::set_heap_base_min_address() {
if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) {
// By default HeapBaseMinAddress is 2G on all platforms except Solaris x86.
// G1 currently needs a lot of C-heap, so on Solaris we have to give G1
// some extra space for the C-heap compared to other collectors.
// Use FLAG_SET_DEFAULT here rather than FLAG_SET_ERGO to make sure that
// code that checks for default values work correctly.
FLAG_SET_DEFAULT(HeapBaseMinAddress, 1*G);
}
}
void Arguments::set_heap_size() { void Arguments::set_heap_size() {
if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) { if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
// Deprecated flag // Deprecated flag
...@@ -3537,8 +3526,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) { ...@@ -3537,8 +3526,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
} }
} }
set_heap_base_min_address();
// Set heap size based on available physical memory // Set heap size based on available physical memory
set_heap_size(); set_heap_size();
......
...@@ -334,8 +334,6 @@ class Arguments : AllStatic { ...@@ -334,8 +334,6 @@ class Arguments : AllStatic {
// limits the given memory size by the maximum amount of memory this process is // limits the given memory size by the maximum amount of memory this process is
// currently allowed to allocate or reserve. // currently allowed to allocate or reserve.
static julong limit_by_allocatable_memory(julong size); static julong limit_by_allocatable_memory(julong size);
// Setup HeapBaseMinAddress
static void set_heap_base_min_address();
// Setup heap size // Setup heap size
static void set_heap_size(); static void set_heap_size();
// Based on automatic selection criteria, should the // Based on automatic selection criteria, should the
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册