提交 77658a1e 编写于 作者: D dcubed

Merge

...@@ -4104,7 +4104,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case ...@@ -4104,7 +4104,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
sub(top, t1, t1); // size of tlab's allocated portion sub(top, t1, t1); // size of tlab's allocated portion
incr_allocated_bytes(t1, 0, t2); incr_allocated_bytes(t1, t2, t3);
// refill the tlab with an eden allocation // refill the tlab with an eden allocation
bind(do_refill); bind(do_refill);
...@@ -4138,19 +4138,14 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case ...@@ -4138,19 +4138,14 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
delayed()->nop(); delayed()->nop();
} }
void MacroAssembler::incr_allocated_bytes(Register var_size_in_bytes, void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
int con_size_in_bytes, Register t1, Register t2) {
Register t1) {
// Bump total bytes allocated by this thread // Bump total bytes allocated by this thread
assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
assert_different_registers(var_size_in_bytes, t1); assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
// v8 support has gone the way of the dodo // v8 support has gone the way of the dodo
ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
if (var_size_in_bytes->is_valid()) { add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
add(t1, var_size_in_bytes, t1);
} else {
add(t1, con_size_in_bytes, t1);
}
stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
} }
......
...@@ -2399,7 +2399,8 @@ public: ...@@ -2399,7 +2399,8 @@ public:
Label& slow_case // continuation point if fast allocation fails Label& slow_case // continuation point if fast allocation fails
); );
void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
void incr_allocated_bytes(Register var_size_in_bytes, int con_size_in_bytes, Register t1); void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
Register t1, Register t2);
// interface method calling // interface method calling
void lookup_interface_method(Register recv_klass, void lookup_interface_method(Register recv_klass,
......
...@@ -170,11 +170,13 @@ void C1_MacroAssembler::try_allocate( ...@@ -170,11 +170,13 @@ void C1_MacroAssembler::try_allocate(
Register t2, // temp register Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails Label& slow_case // continuation point if fast allocation fails
) { ) {
RegisterOrConstant size_in_bytes = var_size_in_bytes->is_valid()
? RegisterOrConstant(var_size_in_bytes) : RegisterOrConstant(con_size_in_bytes);
if (UseTLAB) { if (UseTLAB) {
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
} else { } else {
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
incr_allocated_bytes(var_size_in_bytes, con_size_in_bytes, t1); incr_allocated_bytes(size_in_bytes, t1, t2);
} }
} }
......
...@@ -461,7 +461,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -461,7 +461,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the instance size // get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
__ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
__ incr_allocated_bytes(G1_obj_size, 0, G3_t1); __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
__ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
__ verify_oop(O0_obj); __ verify_oop(O0_obj);
...@@ -577,7 +577,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -577,7 +577,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
__ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size
__ incr_allocated_bytes(G1_arr_size, 0, G3_t1); __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);
__ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
__ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
......
...@@ -3447,7 +3447,8 @@ void TemplateTable::_new() { ...@@ -3447,7 +3447,8 @@ void TemplateTable::_new() {
__ delayed()->nop(); __ delayed()->nop();
// bump total bytes allocated by this thread // bump total bytes allocated by this thread
__ incr_allocated_bytes(Roffset, 0, G1_scratch); // RoldTopValue and RtopAddr are dead, so can use G1 and G3
__ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
} }
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
......
...@@ -1610,10 +1610,9 @@ int os::current_process_id() { ...@@ -1610,10 +1610,9 @@ int os::current_process_id() {
const char* os::dll_file_extension() { return ".so"; } const char* os::dll_file_extension() { return ".so"; }
const char* os::get_temp_directory() { // This must be hard coded because it's the system's temporary
const char *prop = Arguments::get_property("java.io.tmpdir"); // directory not the java application's temp directory, ala java.io.tmpdir.
return prop == NULL ? "/tmp" : prop; const char* os::get_temp_directory() { return "/tmp"; }
}
static bool file_exists(const char* filename) { static bool file_exists(const char* filename) {
struct stat statbuf; struct stat statbuf;
......
...@@ -1884,10 +1884,9 @@ void os::set_error_file(const char *logfile) {} ...@@ -1884,10 +1884,9 @@ void os::set_error_file(const char *logfile) {}
const char* os::dll_file_extension() { return ".so"; } const char* os::dll_file_extension() { return ".so"; }
const char* os::get_temp_directory() { // This must be hard coded because it's the system's temporary
const char *prop = Arguments::get_property("java.io.tmpdir"); // directory not the java application's temp directory, ala java.io.tmpdir.
return prop == NULL ? "/tmp" : prop; const char* os::get_temp_directory() { return "/tmp"; }
}
static bool file_exists(const char* filename) { static bool file_exists(const char* filename) {
struct stat statbuf; struct stat statbuf;
......
...@@ -1044,9 +1044,9 @@ os::closedir(DIR *dirp) ...@@ -1044,9 +1044,9 @@ os::closedir(DIR *dirp)
return 0; return 0;
} }
// This must be hard coded because it's the system's temporary
// directory not the java application's temp directory, ala java.io.tmpdir.
const char* os::get_temp_directory() { const char* os::get_temp_directory() {
const char *prop = Arguments::get_property("java.io.tmpdir");
if (prop != 0) return prop;
static char path_buf[MAX_PATH]; static char path_buf[MAX_PATH];
if (GetTempPath(MAX_PATH, path_buf)>0) if (GetTempPath(MAX_PATH, path_buf)>0)
return path_buf; return path_buf;
......
...@@ -1990,9 +1990,8 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) { ...@@ -1990,9 +1990,8 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
LIR_Opr reg = reg = rlock_result(x, x->basic_type()); LIR_Opr reg = reg = rlock_result(x, x->basic_type());
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile()); get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
if (x->is_volatile() && os::is_MP()) __ membar(); if (x->is_volatile() && os::is_MP()) __ membar_acquire();
} }
...@@ -2014,6 +2013,7 @@ void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) { ...@@ -2014,6 +2013,7 @@ void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
if (x->is_volatile() && os::is_MP()) __ membar_release(); if (x->is_volatile() && os::is_MP()) __ membar_release();
put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
if (x->is_volatile() && os::is_MP()) __ membar();
} }
......
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -172,6 +172,8 @@ class SymbolPropertyTable; ...@@ -172,6 +172,8 @@ class SymbolPropertyTable;
\ \
template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \ template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
\ \
template(sun_misc_PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \
\
/* Preload boxing klasses */ \ /* Preload boxing klasses */ \
template(Boolean_klass, java_lang_Boolean, Pre) \ template(Boolean_klass, java_lang_Boolean, Pre) \
template(Character_klass, java_lang_Character, Pre) \ template(Character_klass, java_lang_Character, Pre) \
......
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -111,6 +111,7 @@ ...@@ -111,6 +111,7 @@
template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \ template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
template(setBootClassLoaderHook_name, "setBootClassLoaderHook") \ template(setBootClassLoaderHook_name, "setBootClassLoaderHook") \
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
\ \
/* class file format tags */ \ /* class file format tags */ \
template(tag_source_file, "SourceFile") \ template(tag_source_file, "SourceFile") \
......
...@@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) { ...@@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
assert(_collectorState == InitialMarking, "Wrong collector state"); assert(_collectorState == InitialMarking, "Wrong collector state");
check_correct_thread_executing(); check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState); TraceCMSMemoryManagerStats tms(_collectorState);
ReferenceProcessor* rp = ref_processor(); ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear(); SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant"); assert(_restart_addr == NULL, "Control point invariant");
...@@ -5940,11 +5941,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { ...@@ -5940,11 +5941,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
} }
rp->verify_no_references_recorded(); rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "should have been disabled"); assert(!rp->discovery_enabled(), "should have been disabled");
// JVMTI object tagging is based on JNI weak refs. If any of these
// refs were cleared then JVMTI needs to update its maps and
// maybe post ObjectFrees to agents.
JvmtiExport::cms_ref_processing_epilogue();
} }
#ifndef PRODUCT #ifndef PRODUCT
...@@ -6305,6 +6301,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) { ...@@ -6305,6 +6301,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
switch (op) { switch (op) {
case CMS_op_checkpointRootsInitial: { case CMS_op_checkpointRootsInitial: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsInitial(true); // asynch checkpointRootsInitial(true); // asynch
if (PrintGC) { if (PrintGC) {
_cmsGen->printOccupancy("initial-mark"); _cmsGen->printOccupancy("initial-mark");
...@@ -6312,6 +6309,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) { ...@@ -6312,6 +6309,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
break; break;
} }
case CMS_op_checkpointRootsFinal: { case CMS_op_checkpointRootsFinal: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsFinal(true, // asynch checkpointRootsFinal(true, // asynch
false, // !clear_all_soft_refs false, // !clear_all_soft_refs
false); // !init_mark_was_synchronous false); // !init_mark_was_synchronous
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "memory/genOopClosures.inline.hpp" #include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp" #include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
...@@ -1142,6 +1143,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { ...@@ -1142,6 +1143,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
return; return;
} }
SvcGCMarker sgcm(SvcGCMarker::OTHER);
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)"); gclog_or_tty->print(" VerifyDuringGC:(before)");
......
...@@ -1192,7 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1192,7 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
return false; return false;
} }
DTraceGCProbeMarker gc_probe_marker(true /* full */); SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm; ResourceMark rm;
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
...@@ -3214,7 +3214,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3214,7 +3214,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return false; return false;
} }
DTraceGCProbeMarker gc_probe_marker(false /* full */); SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm; ResourceMark rm;
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
......
...@@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation( ...@@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
} }
void VM_G1CollectForAllocation::doit() { void VM_G1CollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
assert(_result == NULL || _pause_succeeded, assert(_result == NULL || _pause_succeeded,
...@@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() { ...@@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() {
} }
void VM_G1CollectFull::doit() { void VM_G1CollectFull::doit() {
JvmtiGCFullMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause); GCCauseSetter x(g1h, _gc_cause);
g1h->do_full_collection(false /* clear_all_soft_refs */); g1h->do_full_collection(false /* clear_all_soft_refs */);
...@@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause( ...@@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
} }
void VM_G1IncCollectionPause::doit() { void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark || assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
......
...@@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size, ...@@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
} }
void VM_ParallelGCFailedAllocation::doit() { void VM_ParallelGCFailedAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
...@@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() { ...@@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size, VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size,
...@@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s ...@@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s
} }
void VM_ParallelGCFailedPermanentAllocation::doit() { void VM_ParallelGCFailedPermanentAllocation::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
...@@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() { ...@@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
// Only used for System.gc() calls // Only used for System.gc() calls
...@@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count, ...@@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
} }
void VM_ParallelGCSystemGC::doit() { void VM_ParallelGCSystemGC::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
...@@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() { ...@@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() {
} else { } else {
heap->invoke_full_gc(false); heap->invoke_full_gc(false);
} }
notify_gc_end();
} }
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp" #include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp" #include "oops/instanceRefKlass.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/init.hpp" #include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
...@@ -40,6 +39,7 @@ ...@@ -40,6 +39,7 @@
#ifndef SERIALGC #ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#endif #endif
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool); HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end); HS_DTRACE_PROBE_DECL(hotspot, gc__end);
...@@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() { ...@@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() {
void VM_GenCollectForAllocation::doit() { void VM_GenCollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
...@@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() { ...@@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
void VM_GenCollectFull::doit() { void VM_GenCollectFull::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
notify_gc_end();
} }
void VM_GenCollectForPermanentAllocation::doit() { void VM_GenCollectForPermanentAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
SharedHeap* heap = (SharedHeap*)Universe::heap(); SharedHeap* heap = (SharedHeap*)Universe::heap();
GCCauseSetter gccs(heap, _gc_cause); GCCauseSetter gccs(heap, _gc_cause);
switch (heap->kind()) { switch (heap->kind()) {
...@@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() { ...@@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/synchronizer.hpp" #include "runtime/synchronizer.hpp"
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
#include "prims/jvmtiExport.hpp"
// The following class hierarchy represents // The following class hierarchy represents
// a set of operations (VM_Operation) related to GC. // a set of operations (VM_Operation) related to GC.
...@@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation { ...@@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
HeapWord* result() const { return _res; } HeapWord* result() const { return _res; }
}; };
class DTraceGCProbeMarker : public StackObj { class SvcGCMarker : public StackObj {
public: private:
DTraceGCProbeMarker(bool full) { JvmtiGCMarker _jgcm;
VM_GC_Operation::notify_gc_begin(full); public:
typedef enum { MINOR, FULL, OTHER } reason_type;
SvcGCMarker(reason_type reason ) {
VM_GC_Operation::notify_gc_begin(reason == FULL);
} }
~DTraceGCProbeMarker() { ~SvcGCMarker() {
VM_GC_Operation::notify_gc_end(); VM_GC_Operation::notify_gc_end();
} }
}; };
......
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -1649,7 +1649,8 @@ typedef struct { ...@@ -1649,7 +1649,8 @@ typedef struct {
* the new bit is also added in the main/baseline. * the new bit is also added in the main/baseline.
*/ */
unsigned int thread_park_blocker : 1; unsigned int thread_park_blocker : 1;
unsigned int : 31; unsigned int post_vm_init_hook_enabled : 1;
unsigned int : 30;
unsigned int : 32; unsigned int : 32;
unsigned int : 32; unsigned int : 32;
} jdk_version_info; } jdk_version_info;
......
...@@ -13048,8 +13048,8 @@ myInit() { ...@@ -13048,8 +13048,8 @@ myInit() {
<event label="Garbage Collection Start" <event label="Garbage Collection Start"
id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81"> id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81">
<description> <description>
A Garbage Collection Start event is sent when a full cycle A Garbage Collection Start event is sent when a
garbage collection begins. garbage collection pause begins.
Only stop-the-world collections are reported--that is, collections during Only stop-the-world collections are reported--that is, collections during
which all threads cease to modify the state of the Java virtual machine. which all threads cease to modify the state of the Java virtual machine.
This means that some collectors will never generate these events. This means that some collectors will never generate these events.
...@@ -13075,8 +13075,8 @@ myInit() { ...@@ -13075,8 +13075,8 @@ myInit() {
<event label="Garbage Collection Finish" <event label="Garbage Collection Finish"
id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82"> id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82">
<description> <description>
A Garbage Collection Finish event is sent when a full A Garbage Collection Finish event is sent when a
garbage collection cycle ends. garbage collection pause ends.
This event is sent while the VM is still stopped, thus This event is sent while the VM is still stopped, thus
the event handler must not use JNI functions and the event handler must not use JNI functions and
must not use <jvmti/> functions except those which must not use <jvmti/> functions except those which
......
...@@ -667,14 +667,13 @@ void ...@@ -667,14 +667,13 @@ void
JvmtiEventControllerPrivate::thread_ended(JavaThread *thread) { JvmtiEventControllerPrivate::thread_ended(JavaThread *thread) {
// Removes the JvmtiThreadState associated with the specified thread. // Removes the JvmtiThreadState associated with the specified thread.
// May be called after all environments have been disposed. // May be called after all environments have been disposed.
assert(JvmtiThreadState_lock->is_locked(), "sanity check");
EC_TRACE(("JVMTI [%s] # thread ended", JvmtiTrace::safe_get_thread_name(thread))); EC_TRACE(("JVMTI [%s] # thread ended", JvmtiTrace::safe_get_thread_name(thread)));
JvmtiThreadState *state = thread->jvmti_thread_state(); JvmtiThreadState *state = thread->jvmti_thread_state();
if (state != NULL) { assert(state != NULL, "else why are we here?");
MutexLocker mu(JvmtiThreadState_lock); delete state;
delete state;
}
} }
void JvmtiEventControllerPrivate::set_event_callbacks(JvmtiEnvBase *env, void JvmtiEventControllerPrivate::set_event_callbacks(JvmtiEnvBase *env,
......
...@@ -2253,12 +2253,14 @@ void JvmtiExport::post_vm_object_alloc(JavaThread *thread, oop object) { ...@@ -2253,12 +2253,14 @@ void JvmtiExport::post_vm_object_alloc(JavaThread *thread, oop object) {
void JvmtiExport::cleanup_thread(JavaThread* thread) { void JvmtiExport::cleanup_thread(JavaThread* thread) {
assert(JavaThread::current() == thread, "thread is not current"); assert(JavaThread::current() == thread, "thread is not current");
MutexLocker mu(JvmtiThreadState_lock);
if (thread->jvmti_thread_state() != NULL) {
// This has to happen after the thread state is removed, which is // This has to happen after the thread state is removed, which is
// why it is not in post_thread_end_event like its complement // why it is not in post_thread_end_event like its complement
// Maybe both these functions should be rolled into the posts? // Maybe both these functions should be rolled into the posts?
JvmtiEventController::thread_ended(thread); JvmtiEventController::thread_ended(thread);
}
} }
void JvmtiExport::oops_do(OopClosure* f) { void JvmtiExport::oops_do(OopClosure* f) {
...@@ -2358,15 +2360,6 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) { ...@@ -2358,15 +2360,6 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
} }
#endif // SERVICES_KERNEL #endif // SERVICES_KERNEL
// CMS has completed referencing processing so may need to update
// tag maps.
void JvmtiExport::cms_ref_processing_epilogue() {
if (JvmtiEnv::environments_might_exist()) {
JvmtiTagMap::cms_ref_processing_epilogue();
}
}
//////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////
// Setup current current thread for event collection. // Setup current current thread for event collection.
...@@ -2536,36 +2529,20 @@ NoJvmtiVMObjectAllocMark::~NoJvmtiVMObjectAllocMark() { ...@@ -2536,36 +2529,20 @@ NoJvmtiVMObjectAllocMark::~NoJvmtiVMObjectAllocMark() {
} }
}; };
JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) { JvmtiGCMarker::JvmtiGCMarker() {
assert(Thread::current()->is_VM_thread(), "wrong thread");
// if there aren't any JVMTI environments then nothing to do // if there aren't any JVMTI environments then nothing to do
if (!JvmtiEnv::environments_might_exist()) { if (!JvmtiEnv::environments_might_exist()) {
return; return;
} }
if (ForceFullGCJVMTIEpilogues) {
// force 'Full GC' was done semantics for JVMTI GC epilogues
_full = true;
}
// GarbageCollectionStart event posted from VM thread - okay because
// JVMTI is clear that the "world is stopped" and callback shouldn't
// try to call into the VM.
if (JvmtiExport::should_post_garbage_collection_start()) { if (JvmtiExport::should_post_garbage_collection_start()) {
JvmtiExport::post_garbage_collection_start(); JvmtiExport::post_garbage_collection_start();
} }
// if "full" is false it probably means this is a scavenge of the young if (SafepointSynchronize::is_at_safepoint()) {
// generation. However it could turn out that a "full" GC is required // Do clean up tasks that need to be done at a safepoint
// so we record the number of collections so that it can be checked in JvmtiEnvBase::check_for_periodic_clean_up();
// the destructor.
if (!_full) {
_invocation_count = Universe::heap()->total_full_collections();
} }
// Do clean up tasks that need to be done at a safepoint
JvmtiEnvBase::check_for_periodic_clean_up();
} }
JvmtiGCMarker::~JvmtiGCMarker() { JvmtiGCMarker::~JvmtiGCMarker() {
...@@ -2578,21 +2555,5 @@ JvmtiGCMarker::~JvmtiGCMarker() { ...@@ -2578,21 +2555,5 @@ JvmtiGCMarker::~JvmtiGCMarker() {
if (JvmtiExport::should_post_garbage_collection_finish()) { if (JvmtiExport::should_post_garbage_collection_finish()) {
JvmtiExport::post_garbage_collection_finish(); JvmtiExport::post_garbage_collection_finish();
} }
// we might have initially started out doing a scavenge of the young
// generation but could have ended up doing a "full" GC - check the
// GC count to see.
if (!_full) {
_full = (_invocation_count != Universe::heap()->total_full_collections());
}
// Full collection probably means the perm generation has been GC'ed
// so we clear the breakpoint cache.
if (_full) {
JvmtiCurrentBreakpoints::gc_epilogue();
}
// Notify heap/object tagging support
JvmtiTagMap::gc_epilogue(_full);
} }
#endif // JVMTI_KERNEL #endif // JVMTI_KERNEL
...@@ -356,9 +356,6 @@ class JvmtiExport : public AllStatic { ...@@ -356,9 +356,6 @@ class JvmtiExport : public AllStatic {
// SetNativeMethodPrefix support // SetNativeMethodPrefix support
static char** get_all_native_method_prefixes(int* count_ptr); static char** get_all_native_method_prefixes(int* count_ptr);
// call after CMS has completed referencing processing
static void cms_ref_processing_epilogue() KERNEL_RETURN;
}; };
// Support class used by JvmtiDynamicCodeEventCollector and others. It // Support class used by JvmtiDynamicCodeEventCollector and others. It
...@@ -492,55 +489,11 @@ class NoJvmtiVMObjectAllocMark : public StackObj { ...@@ -492,55 +489,11 @@ class NoJvmtiVMObjectAllocMark : public StackObj {
// Base class for reporting GC events to JVMTI. // Base class for reporting GC events to JVMTI.
class JvmtiGCMarker : public StackObj { class JvmtiGCMarker : public StackObj {
private:
bool _full; // marks a "full" GC
unsigned int _invocation_count; // GC invocation count
protected:
JvmtiGCMarker(bool full) KERNEL_RETURN; // protected
~JvmtiGCMarker() KERNEL_RETURN; // protected
};
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a stop-the-world GC for failed allocation.
//
// Usage :-
//
// void VM_GenCollectForAllocation::doit() {
// JvmtiGCForAllocationMarker jgcm;
// :
// }
//
// If jvmti is not enabled the constructor and destructor is essentially
// a no-op (no overhead).
//
class JvmtiGCForAllocationMarker : public JvmtiGCMarker {
public: public:
JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) { JvmtiGCMarker() KERNEL_RETURN;
} ~JvmtiGCMarker() KERNEL_RETURN;
};
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a "full" stop-the-world GC. This class differs
// from JvmtiGCForAllocationMarker in that this class assumes that a
// "full" GC will happen.
//
// Usage :-
//
// void VM_GenCollectFull::doit() {
// JvmtiGCFullMarker jgcm;
// :
// }
//
class JvmtiGCFullMarker : public JvmtiGCMarker {
public:
JvmtiGCFullMarker() : JvmtiGCMarker(true) {
}
}; };
// JvmtiHideSingleStepping is a helper class for hiding // JvmtiHideSingleStepping is a helper class for hiding
// internal single step events. // internal single step events.
class JvmtiHideSingleStepping : public StackObj { class JvmtiHideSingleStepping : public StackObj {
......
...@@ -212,14 +212,7 @@ void GrowableCache::oops_do(OopClosure* f) { ...@@ -212,14 +212,7 @@ void GrowableCache::oops_do(OopClosure* f) {
for (int i=0; i<len; i++) { for (int i=0; i<len; i++) {
GrowableElement *e = _elements->at(i); GrowableElement *e = _elements->at(i);
e->oops_do(f); e->oops_do(f);
} _cache[i] = e->getCacheValue();
}
void GrowableCache::gc_epilogue() {
int len = _elements->length();
// recompute the new cache value after GC
for (int i=0; i<len; i++) {
_cache[i] = _elements->at(i)->getCacheValue();
} }
} }
...@@ -401,10 +394,6 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) { ...@@ -401,10 +394,6 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) {
_bps.oops_do(f); _bps.oops_do(f);
} }
void JvmtiBreakpoints::gc_epilogue() {
_bps.gc_epilogue();
}
void JvmtiBreakpoints::print() { void JvmtiBreakpoints::print() {
#ifndef PRODUCT #ifndef PRODUCT
ResourceMark rm; ResourceMark rm;
...@@ -534,13 +523,6 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) { ...@@ -534,13 +523,6 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
} }
} }
void JvmtiCurrentBreakpoints::gc_epilogue() {
if (_jvmti_breakpoints != NULL) {
_jvmti_breakpoints->gc_epilogue();
}
}
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// //
// class VM_GetOrSetLocal // class VM_GetOrSetLocal
......
...@@ -117,7 +117,6 @@ public: ...@@ -117,7 +117,6 @@ public:
void clear(); void clear();
// apply f to every element and update the cache // apply f to every element and update the cache
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
void gc_epilogue();
}; };
...@@ -149,7 +148,6 @@ public: ...@@ -149,7 +148,6 @@ public:
void remove (int index) { _cache.remove(index); } void remove (int index) { _cache.remove(index); }
void clear() { _cache.clear(); } void clear() { _cache.clear(); }
void oops_do(OopClosure* f) { _cache.oops_do(f); } void oops_do(OopClosure* f) { _cache.oops_do(f); }
void gc_epilogue() { _cache.gc_epilogue(); }
}; };
...@@ -278,7 +276,6 @@ public: ...@@ -278,7 +276,6 @@ public:
int length(); int length();
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
void gc_epilogue();
void print(); void print();
int set(JvmtiBreakpoint& bp); int set(JvmtiBreakpoint& bp);
...@@ -328,7 +325,6 @@ public: ...@@ -328,7 +325,6 @@ public:
static inline bool is_breakpoint(address bcp); static inline bool is_breakpoint(address bcp);
static void oops_do(OopClosure* f); static void oops_do(OopClosure* f);
static void gc_epilogue();
}; };
// quickly test whether the bcp matches a cached breakpoint in the list // quickly test whether the bcp matches a cached breakpoint in the list
......
此差异已折叠。
...@@ -45,17 +45,12 @@ class JvmtiTagMap : public CHeapObj { ...@@ -45,17 +45,12 @@ class JvmtiTagMap : public CHeapObj {
private: private:
enum{ enum{
n_hashmaps = 2, // encapsulates 2 hashmaps max_free_entries = 4096 // maximum number of free entries per env
max_free_entries = 4096 // maximum number of free entries per env
}; };
// memory region for young generation
static MemRegion _young_gen;
static void get_young_generation();
JvmtiEnv* _env; // the jvmti environment JvmtiEnv* _env; // the jvmti environment
Mutex _lock; // lock for this tag map Mutex _lock; // lock for this tag map
JvmtiTagHashmap* _hashmap[n_hashmaps]; // the hashmaps JvmtiTagHashmap* _hashmap; // the hashmap
JvmtiTagHashmapEntry* _free_entries; // free list for this environment JvmtiTagHashmapEntry* _free_entries; // free list for this environment
int _free_entries_count; // number of entries on the free list int _free_entries_count; // number of entries on the free list
...@@ -67,11 +62,7 @@ class JvmtiTagMap : public CHeapObj { ...@@ -67,11 +62,7 @@ class JvmtiTagMap : public CHeapObj {
inline Mutex* lock() { return &_lock; } inline Mutex* lock() { return &_lock; }
inline JvmtiEnv* env() const { return _env; } inline JvmtiEnv* env() const { return _env; }
// rehash tags maps for generation start to end void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
void rehash(int start, int end);
// indicates if the object is in the young generation
static bool is_in_young(oop o);
// iterate over all entries in this tag map // iterate over all entries in this tag map
void entry_iterate(JvmtiTagHashmapEntryClosure* closure); void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
...@@ -81,11 +72,10 @@ class JvmtiTagMap : public CHeapObj { ...@@ -81,11 +72,10 @@ class JvmtiTagMap : public CHeapObj {
// indicates if this tag map is locked // indicates if this tag map is locked
bool is_locked() { return lock()->is_locked(); } bool is_locked() { return lock()->is_locked(); }
// return the appropriate hashmap for a given object JvmtiTagHashmap* hashmap() { return _hashmap; }
JvmtiTagHashmap* hashmap_for(oop o);
// create/destroy entries // create/destroy entries
JvmtiTagHashmapEntry* create_entry(jweak ref, jlong tag); JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag);
void destroy_entry(JvmtiTagHashmapEntry* entry); void destroy_entry(JvmtiTagHashmapEntry* entry);
// returns true if the hashmaps are empty // returns true if the hashmaps are empty
...@@ -134,11 +124,8 @@ class JvmtiTagMap : public CHeapObj { ...@@ -134,11 +124,8 @@ class JvmtiTagMap : public CHeapObj {
jint* count_ptr, jobject** object_result_ptr, jint* count_ptr, jobject** object_result_ptr,
jlong** tag_result_ptr); jlong** tag_result_ptr);
// call post-GC to rehash the tag maps. static void weak_oops_do(
static void gc_epilogue(bool full); BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN;
// call after referencing processing has completed (CMS)
static void cms_ref_processing_epilogue();
}; };
#endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP #endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -918,9 +918,7 @@ bool Arguments::add_property(const char* prop) { ...@@ -918,9 +918,7 @@ bool Arguments::add_property(const char* prop) {
} else if (strcmp(key, "sun.java.command") == 0) { } else if (strcmp(key, "sun.java.command") == 0) {
_java_command = value; _java_command = value;
// don't add this property to the properties exposed to the java application // Record value in Arguments, but let it get passed to Java.
FreeHeap(key);
return true;
} else if (strcmp(key, "sun.java.launcher.pid") == 0) { } else if (strcmp(key, "sun.java.launcher.pid") == 0) {
// launcher.pid property is private and is processed // launcher.pid property is private and is processed
// in process_sun_java_launcher_properties(); // in process_sun_java_launcher_properties();
......
...@@ -1198,9 +1198,6 @@ class CommandLineFlags { ...@@ -1198,9 +1198,6 @@ class CommandLineFlags {
product(ccstr, TraceJVMTI, NULL, \ product(ccstr, TraceJVMTI, NULL, \
"Trace flags for JVMTI functions and events") \ "Trace flags for JVMTI functions and events") \
\ \
product(bool, ForceFullGCJVMTIEpilogues, false, \
"Force 'Full GC' was done semantics for JVMTI GC epilogues") \
\
/* This option can change an EMCP method into an obsolete method. */ \ /* This option can change an EMCP method into an obsolete method. */ \
/* This can affect tests that except specific methods to be EMCP. */ \ /* This can affect tests that except specific methods to be EMCP. */ \
/* This option should be used with caution. */ \ /* This option should be used with caution. */ \
......
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -663,7 +663,8 @@ void JDK_Version::initialize() { ...@@ -663,7 +663,8 @@ void JDK_Version::initialize() {
} }
_current = JDK_Version(major, minor, micro, info.update_version, _current = JDK_Version(major, minor, micro, info.update_version,
info.special_update_version, build, info.special_update_version, build,
info.thread_park_blocker == 1); info.thread_park_blocker == 1,
info.post_vm_init_hook_enabled == 1);
} }
} }
......
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -92,6 +92,7 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC { ...@@ -92,6 +92,7 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC {
bool _partially_initialized; bool _partially_initialized;
bool _thread_park_blocker; bool _thread_park_blocker;
bool _post_vm_init_hook_enabled;
bool is_valid() const { bool is_valid() const {
return (_major != 0 || _partially_initialized); return (_major != 0 || _partially_initialized);
...@@ -113,14 +114,15 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC { ...@@ -113,14 +114,15 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC {
JDK_Version() : _major(0), _minor(0), _micro(0), _update(0), JDK_Version() : _major(0), _minor(0), _micro(0), _update(0),
_special(0), _build(0), _partially_initialized(false), _special(0), _build(0), _partially_initialized(false),
_thread_park_blocker(false) {} _thread_park_blocker(false), _post_vm_init_hook_enabled(false) {}
JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0, JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0,
uint8_t update = 0, uint8_t special = 0, uint8_t build = 0, uint8_t update = 0, uint8_t special = 0, uint8_t build = 0,
bool thread_park_blocker = false) : bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false) :
_major(major), _minor(minor), _micro(micro), _update(update), _major(major), _minor(minor), _micro(micro), _update(update),
_special(special), _build(build), _partially_initialized(false), _special(special), _build(build), _partially_initialized(false),
_thread_park_blocker(thread_park_blocker) {} _thread_park_blocker(thread_park_blocker),
_post_vm_init_hook_enabled(post_vm_init_hook_enabled) {}
// Returns the current running JDK version // Returns the current running JDK version
static JDK_Version current() { return _current; } static JDK_Version current() { return _current; }
...@@ -144,6 +146,9 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC { ...@@ -144,6 +146,9 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC {
bool supports_thread_park_blocker() const { bool supports_thread_park_blocker() const {
return _thread_park_blocker; return _thread_park_blocker;
} }
bool post_vm_init_hook_enabled() const {
return _post_vm_init_hook_enabled;
}
// Performs a full ordering comparison using all fields (update, build, etc.) // Performs a full ordering comparison using all fields (update, build, etc.)
int compare(const JDK_Version& other) const; int compare(const JDK_Version& other) const;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
#ifdef TARGET_OS_FAMILY_linux #ifdef TARGET_OS_FAMILY_linux
...@@ -428,6 +429,12 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive, ...@@ -428,6 +429,12 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
break; break;
} }
} }
/*
* JvmtiTagMap may also contain weak oops. The iteration of it is placed
* here so that we don't need to add it to each of the collectors.
*/
JvmtiTagMap::weak_oops_do(is_alive, f);
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp" #include "interpreter/linkResolver.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp" #include "memory/universe.inline.hpp"
#include "oops/instanceKlass.hpp" #include "oops/instanceKlass.hpp"
...@@ -977,6 +978,19 @@ static void set_jkernel_boot_classloader_hook(TRAPS) { ...@@ -977,6 +978,19 @@ static void set_jkernel_boot_classloader_hook(TRAPS) {
} }
#endif // KERNEL #endif // KERNEL
// General purpose hook into Java code, run once when the VM is initialized.
// The Java library method itself may be changed independently from the VM.
static void call_postVMInitHook(TRAPS) {
klassOop k = SystemDictionary::sun_misc_PostVMInitHook_klass();
instanceKlassHandle klass (THREAD, k);
if (klass.not_null()) {
JavaValue result(T_VOID);
JavaCalls::call_static(&result, klass, vmSymbolHandles::run_method_name(),
vmSymbolHandles::void_method_signature(),
CHECK);
}
}
static void reset_vm_info_property(TRAPS) { static void reset_vm_info_property(TRAPS) {
// the vm info string // the vm info string
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
...@@ -1699,7 +1713,7 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) { ...@@ -1699,7 +1713,7 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
tlab().make_parsable(true); // retire TLAB tlab().make_parsable(true); // retire TLAB
} }
if (jvmti_thread_state() != NULL) { if (JvmtiEnv::environments_might_exist()) {
JvmtiExport::cleanup_thread(this); JvmtiExport::cleanup_thread(this);
} }
...@@ -3345,6 +3359,14 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { ...@@ -3345,6 +3359,14 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
BiasedLocking::init(); BiasedLocking::init();
if (JDK_Version::current().post_vm_init_hook_enabled()) {
call_postVMInitHook(THREAD);
// The Java side of PostVMInitHook.run must deal with all
// exceptions and provide means of diagnosis.
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
}
// Start up the WatcherThread if there are any periodic tasks // Start up the WatcherThread if there are any periodic tasks
// NOTE: All PeriodicTasks should be registered by now. If they // NOTE: All PeriodicTasks should be registered by now. If they
......
...@@ -809,7 +809,7 @@ class JavaThread: public Thread { ...@@ -809,7 +809,7 @@ class JavaThread: public Thread {
// //
// _vm_exited is a special value to cover the case of a JavaThread // _vm_exited is a special value to cover the case of a JavaThread
// executing native code after the VM itself is terminated. // executing native code after the VM itself is terminated.
TerminatedTypes _terminated; volatile TerminatedTypes _terminated;
// suspend/resume support // suspend/resume support
volatile bool _suspend_equivalent; // Suspend equivalent condition volatile bool _suspend_equivalent; // Suspend equivalent condition
jint _in_deopt_handler; // count of deoptimization jint _in_deopt_handler; // count of deoptimization
......
...@@ -874,11 +874,13 @@ void VMError::report_and_die() { ...@@ -874,11 +874,13 @@ void VMError::report_and_die() {
} }
if (fd == -1) { if (fd == -1) {
// try temp directory
const char * tmpdir = os::get_temp_directory(); const char * tmpdir = os::get_temp_directory();
jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log", // try temp directory if it exists.
tmpdir, os::file_separator(), os::current_process_id()); if (tmpdir != NULL && tmpdir[0] != '\0') {
fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666); jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
tmpdir, os::file_separator(), os::current_process_id());
fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
}
} }
if (fd != -1) { if (fd != -1) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册