diff --git a/make/Makefile b/make/Makefile index a0b7ba7687c577d9694cb32fec960f68d9e7d46b..fe5a6b684d67e3d4831288825e42c480a208a886 100644 --- a/make/Makefile +++ b/make/Makefile @@ -453,14 +453,30 @@ ifneq ($(OSNAME),windows) ifeq ($(JVM_VARIANT_ZEROSHARK), true) $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) + $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo + $(install-file) + $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz + $(install-file) $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) + $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo + $(install-file) + $(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz + $(install-file) endif ifeq ($(JVM_VARIANT_ZERO), true) $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) + $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo + $(install-file) + $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz + $(install-file) $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) + $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo + $(install-file) + $(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz + $(install-file) endif ifeq ($(JVM_VARIANT_MINIMAL1), true) $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) diff --git a/make/hotspot_version b/make/hotspot_version index 1f72e227aaefe2fea0472e049e90300e6e32dc6d..59320dbec54fec6fa018f497dca4145d02981a6c 100644 --- a/make/hotspot_version +++ b/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012 HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=07 +HS_BUILD_NUMBER=08 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff --git a/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp b/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp index ff61306c760f2773ab40420efcbbaab03c3d270c..6b2cacf5ae2aecabf6e215775b46a3afb1c83fee 100644 --- a/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp +++ b/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp @@ -31,12 +31,17 @@ return _masm; } - protected: - address generate_entry(address entry_point) { - ZeroEntry *entry = (ZeroEntry *) assembler()->pc(); - assembler()->advance(sizeof(ZeroEntry)); + public: + static address generate_entry_impl(MacroAssembler* masm, address entry_point) { + ZeroEntry *entry = (ZeroEntry *) masm->pc(); + masm->advance(sizeof(ZeroEntry)); entry->set_entry_point(entry_point); return (address) entry; } + protected: + address generate_entry(address entry_point) { + return generate_entry_impl(assembler(), entry_point); + } + #endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP diff --git a/src/cpu/zero/vm/cppInterpreter_zero.cpp b/src/cpu/zero/vm/cppInterpreter_zero.cpp index 1b2da5a5d1591a7377f687da13505b011afcc728..ee855a9efbc27432418e7dbe4e824fad483fd8b2 100644 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp @@ -180,25 +180,6 @@ void CppInterpreter::main_loop(int recurse, TRAPS) { method, istate->osr_entry(), istate->osr_buf(), THREAD); return; } - else if (istate->msg() == BytecodeInterpreter::call_method_handle) { - oop method_handle = istate->callee(); - - // Trim back the stack to put the parameters at the top - stack->set_sp(istate->stack() + 1); - - // Make the call - process_method_handle(method_handle, THREAD); - fixup_after_potential_safepoint(); - - // Convert the result - istate->set_stack(stack->sp() - 1); - - // Restore the stack - stack->set_sp(istate->stack_limit() + 1); - - // Resume the interpreter - istate->set_msg(BytecodeInterpreter::method_resume); - } else { ShouldNotReachHere(); } @@ -535,35 +516,35 @@ int CppInterpreter::accessor_entry(Method* method, intptr_t UNUSED, TRAPS) { if (entry->is_volatile()) { switch (entry->flag_state()) { case ctos: - SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0); + SET_LOCALS_INT(object->char_field_acquire(entry->f2_as_index()), 0); break; case btos: - SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0); + SET_LOCALS_INT(object->byte_field_acquire(entry->f2_as_index()), 0); break; case stos: - SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0); + SET_LOCALS_INT(object->short_field_acquire(entry->f2_as_index()), 0); break; case itos: - SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0); + SET_LOCALS_INT(object->int_field_acquire(entry->f2_as_index()), 0); break; case ltos: - SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0); + SET_LOCALS_LONG(object->long_field_acquire(entry->f2_as_index()), 0); break; case ftos: - SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0); + SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2_as_index()), 0); break; case dtos: - SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0); + SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2_as_index()), 0); break; case atos: - SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0); + SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2_as_index()), 0); break; default: @@ -573,35 +554,35 @@ int CppInterpreter::accessor_entry(Method* method, intptr_t UNUSED, TRAPS) { else { switch (entry->flag_state()) { case ctos: - SET_LOCALS_INT(object->char_field(entry->f2()), 0); + SET_LOCALS_INT(object->char_field(entry->f2_as_index()), 0); break; case btos: - SET_LOCALS_INT(object->byte_field(entry->f2()), 0); + SET_LOCALS_INT(object->byte_field(entry->f2_as_index()), 0); break; case stos: - SET_LOCALS_INT(object->short_field(entry->f2()), 0); + SET_LOCALS_INT(object->short_field(entry->f2_as_index()), 0); break; case itos: - SET_LOCALS_INT(object->int_field(entry->f2()), 0); + SET_LOCALS_INT(object->int_field(entry->f2_as_index()), 0); break; case ltos: - SET_LOCALS_LONG(object->long_field(entry->f2()), 0); + SET_LOCALS_LONG(object->long_field(entry->f2_as_index()), 0); break; case ftos: - SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0); + SET_LOCALS_FLOAT(object->float_field(entry->f2_as_index()), 0); break; case dtos: - SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0); + SET_LOCALS_DOUBLE(object->double_field(entry->f2_as_index()), 0); break; case atos: - SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0); + SET_LOCALS_OBJECT(object->obj_field(entry->f2_as_index()), 0); break; default: @@ -629,516 +610,6 @@ int CppInterpreter::empty_entry(Method* method, intptr_t UNUSED, TRAPS) { return 0; } -int CppInterpreter::method_handle_entry(Method* method, - intptr_t UNUSED, TRAPS) { - JavaThread *thread = (JavaThread *) THREAD; - ZeroStack *stack = thread->zero_stack(); - int argument_slots = method->size_of_parameters(); - int result_slots = type2size[result_type_of(method)]; - intptr_t *vmslots = stack->sp(); - intptr_t *unwind_sp = vmslots + argument_slots; - - // Find the MethodType - address p = (address) method; - for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) { - p = *(address*)(p + (*pc)); - } - oop method_type = (oop) p; - - // The MethodHandle is in the slot after the arguments - int num_vmslots = argument_slots - 1; - oop method_handle = VMSLOTS_OBJECT(num_vmslots); - - // InvokeGeneric requires some extra shuffling - oop mhtype = java_lang_invoke_MethodHandle::type(method_handle); - bool is_exact = mhtype == method_type; - if (!is_exact) { - if (true || // FIXME - method->intrinsic_id() == vmIntrinsics::_invokeExact) { - CALL_VM_NOCHECK_NOFIX( - SharedRuntime::throw_WrongMethodTypeException( - thread, method_type, mhtype)); - // NB all oops trashed! - assert(HAS_PENDING_EXCEPTION, "should do"); - stack->set_sp(unwind_sp); - return 0; - } - assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be"); - - // Load up an adapter from the calling type - // NB the x86 code for this (in methodHandles_x86.cpp, search for - // "genericInvoker") is really really odd. I'm hoping it's trying - // to accomodate odd VM/class library combinations I can ignore. - oop adapter = NULL; //FIXME: load the adapter from the CP cache - IF (adapter == NULL) { - CALL_VM_NOCHECK_NOFIX( - SharedRuntime::throw_WrongMethodTypeException( - thread, method_type, mhtype)); - // NB all oops trashed! - assert(HAS_PENDING_EXCEPTION, "should do"); - stack->set_sp(unwind_sp); - return 0; - } - - // Adapters are shared among form-families of method-type. The - // type being called is passed as a trusted first argument so that - // the adapter knows the actual types of its arguments and return - // values. - insert_vmslots(num_vmslots + 1, 1, THREAD); - if (HAS_PENDING_EXCEPTION) { - // NB all oops trashed! - stack->set_sp(unwind_sp); - return 0; - } - - vmslots = stack->sp(); - num_vmslots++; - SET_VMSLOTS_OBJECT(method_type, num_vmslots); - - method_handle = adapter; - } - - // Start processing - process_method_handle(method_handle, THREAD); - if (HAS_PENDING_EXCEPTION) - result_slots = 0; - - // If this is an invokeExact then the eventual callee will not - // have unwound the method handle argument so we have to do it. - // If a result is being returned the it will be above the method - // handle argument we're unwinding. - if (is_exact) { - intptr_t result[2]; - for (int i = 0; i < result_slots; i++) - result[i] = stack->pop(); - stack->pop(); - for (int i = result_slots - 1; i >= 0; i--) - stack->push(result[i]); - } - - // Check - assert(stack->sp() == unwind_sp - result_slots, "should be"); - - // No deoptimized frames on the stack - return 0; -} - -void CppInterpreter::process_method_handle(oop method_handle, TRAPS) { - JavaThread *thread = (JavaThread *) THREAD; - ZeroStack *stack = thread->zero_stack(); - intptr_t *vmslots = stack->sp(); - - bool direct_to_method = false; - BasicType src_rtype = T_ILLEGAL; - BasicType dst_rtype = T_ILLEGAL; - - MethodHandleEntry *entry = - java_lang_invoke_MethodHandle::vmentry(method_handle); - MethodHandles::EntryKind entry_kind = - (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff); - - Method* method = NULL; - switch (entry_kind) { - case MethodHandles::_invokestatic_mh: - direct_to_method = true; - break; - - case MethodHandles::_invokespecial_mh: - case MethodHandles::_invokevirtual_mh: - case MethodHandles::_invokeinterface_mh: - { - oop receiver = - VMSLOTS_OBJECT( - java_lang_invoke_MethodHandle::vmslots(method_handle) - 1); - if (receiver == NULL) { - stack->set_sp(calculate_unwind_sp(stack, method_handle)); - CALL_VM_NOCHECK_NOFIX( - throw_exception( - thread, vmSymbols::java_lang_NullPointerException())); - // NB all oops trashed! - assert(HAS_PENDING_EXCEPTION, "should do"); - return; - } - if (entry_kind != MethodHandles::_invokespecial_mh) { - intptr_t index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle); - InstanceKlass* rcvrKlass = - (InstanceKlass *) receiver->klass(); - if (entry_kind == MethodHandles::_invokevirtual_mh) { - method = (Method*) rcvrKlass->start_of_vtable()[index]; - } - else { - oop iclass = java_lang_invoke_MethodHandle::next_target(method_handle); - itableOffsetEntry* ki = - (itableOffsetEntry *) rcvrKlass->start_of_itable(); - int i, length = rcvrKlass->itable_length(); - for (i = 0; i < length; i++, ki++ ) { - if (ki->interface_klass() == iclass) - break; - } - if (i == length) { - stack->set_sp(calculate_unwind_sp(stack, method_handle)); - CALL_VM_NOCHECK_NOFIX( - throw_exception( - thread, vmSymbols::java_lang_IncompatibleClassChangeError())); - // NB all oops trashed! - assert(HAS_PENDING_EXCEPTION, "should do"); - return; - } - itableMethodEntry* im = ki->first_method_entry(receiver->klass()); - method = im[index].method(); - if (method == NULL) { - stack->set_sp(calculate_unwind_sp(stack, method_handle)); - CALL_VM_NOCHECK_NOFIX( - throw_exception( - thread, vmSymbols::java_lang_AbstractMethodError())); - // NB all oops trashed! - assert(HAS_PENDING_EXCEPTION, "should do"); - return; - } - } - } - } - direct_to_method = true; - break; - - case MethodHandles::_bound_ref_direct_mh: - case MethodHandles::_bound_int_direct_mh: - case MethodHandles::_bound_long_direct_mh: - direct_to_method = true; - // fall through - case MethodHandles::_bound_ref_mh: - case MethodHandles::_bound_int_mh: - case MethodHandles::_bound_long_mh: - { - BasicType arg_type = T_ILLEGAL; - int arg_mask = -1; - int arg_slots = -1; - MethodHandles::get_ek_bound_mh_info( - entry_kind, arg_type, arg_mask, arg_slots); - int arg_slot = - java_lang_invoke_BoundMethodHandle::vmargslot(method_handle); - - // Create the new slot(s) - intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); - insert_vmslots(arg_slot, arg_slots, THREAD); - if (HAS_PENDING_EXCEPTION) { - // all oops trashed - stack->set_sp(unwind_sp); - return; - } - vmslots = stack->sp(); - - // Store bound argument into new stack slot - oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle); - if (arg_type == T_OBJECT) { - assert(arg_slots == 1, "should be"); - SET_VMSLOTS_OBJECT(arg, arg_slot); - } - else { - jvalue arg_value; - arg_type = java_lang_boxing_object::get_value(arg, &arg_value); - switch (arg_type) { - case T_BOOLEAN: - SET_VMSLOTS_INT(arg_value.z, arg_slot); - break; - case T_CHAR: - SET_VMSLOTS_INT(arg_value.c, arg_slot); - break; - case T_BYTE: - SET_VMSLOTS_INT(arg_value.b, arg_slot); - break; - case T_SHORT: - SET_VMSLOTS_INT(arg_value.s, arg_slot); - break; - case T_INT: - SET_VMSLOTS_INT(arg_value.i, arg_slot); - break; - case T_FLOAT: - SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); - break; - case T_LONG: - SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1); - break; - case T_DOUBLE: - SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1); - break; - default: - tty->print_cr("unhandled type %s", type2name(arg_type)); - ShouldNotReachHere(); - } - } - } - break; - - case MethodHandles::_adapter_retype_only: - case MethodHandles::_adapter_retype_raw: - src_rtype = result_type_of_handle( - java_lang_invoke_MethodHandle::next_target(method_handle)); - dst_rtype = result_type_of_handle(method_handle); - break; - - case MethodHandles::_adapter_check_cast: - { - int arg_slot = - java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); - oop arg = VMSLOTS_OBJECT(arg_slot); - if (arg != NULL) { - Klass* objKlassOop = arg->klass(); - Klass* klassOf = java_lang_Class::as_Klass( - java_lang_invoke_AdapterMethodHandle::argument(method_handle)); - - if (objKlassOop != klassOf && - !objKlassOop->is_subtype_of(klassOf)) { - ResourceMark rm(THREAD); - const char* objName = Klass::cast(objKlassOop)->external_name(); - const char* klassName = Klass::cast(klassOf)->external_name(); - char* message = SharedRuntime::generate_class_cast_message( - objName, klassName); - - stack->set_sp(calculate_unwind_sp(stack, method_handle)); - CALL_VM_NOCHECK_NOFIX( - throw_exception( - thread, vmSymbols::java_lang_ClassCastException(), message)); - // NB all oops trashed! - assert(HAS_PENDING_EXCEPTION, "should do"); - return; - } - } - } - break; - - case MethodHandles::_adapter_dup_args: - { - int arg_slot = - java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); - int conv = - java_lang_invoke_AdapterMethodHandle::conversion(method_handle); - int num_slots = -MethodHandles::adapter_conversion_stack_move(conv); - assert(num_slots > 0, "should be"); - - // Create the new slot(s) - intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); - stack->overflow_check(num_slots, THREAD); - if (HAS_PENDING_EXCEPTION) { - // all oops trashed - stack->set_sp(unwind_sp); - return; - } - - // Duplicate the arguments - for (int i = num_slots - 1; i >= 0; i--) - stack->push(*VMSLOTS_SLOT(arg_slot + i)); - - vmslots = stack->sp(); // unused, but let the compiler figure that out - } - break; - - case MethodHandles::_adapter_drop_args: - { - int arg_slot = - java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); - int conv = - java_lang_invoke_AdapterMethodHandle::conversion(method_handle); - int num_slots = MethodHandles::adapter_conversion_stack_move(conv); - assert(num_slots > 0, "should be"); - - remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap - vmslots = stack->sp(); // unused, but let the compiler figure that out - } - break; - - case MethodHandles::_adapter_opt_swap_1: - case MethodHandles::_adapter_opt_swap_2: - case MethodHandles::_adapter_opt_rot_1_up: - case MethodHandles::_adapter_opt_rot_1_down: - case MethodHandles::_adapter_opt_rot_2_up: - case MethodHandles::_adapter_opt_rot_2_down: - { - int arg1 = - java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); - int conv = - java_lang_invoke_AdapterMethodHandle::conversion(method_handle); - int arg2 = MethodHandles::adapter_conversion_vminfo(conv); - - int swap_bytes = 0, rotate = 0; - MethodHandles::get_ek_adapter_opt_swap_rot_info( - entry_kind, swap_bytes, rotate); - int swap_slots = swap_bytes >> LogBytesPerWord; - - intptr_t tmp; - switch (rotate) { - case 0: // swap - for (int i = 0; i < swap_slots; i++) { - tmp = *VMSLOTS_SLOT(arg1 + i); - SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i); - SET_VMSLOTS_SLOT(&tmp, arg2 + i); - } - break; - - case 1: // up - assert(arg1 - swap_slots > arg2, "should be"); - - tmp = *VMSLOTS_SLOT(arg1); - for (int i = arg1 - swap_slots; i >= arg2; i--) - SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots); - SET_VMSLOTS_SLOT(&tmp, arg2); - - break; - - case -1: // down - assert(arg2 - swap_slots > arg1, "should be"); - - tmp = *VMSLOTS_SLOT(arg1); - for (int i = arg1 + swap_slots; i <= arg2; i++) - SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots); - SET_VMSLOTS_SLOT(&tmp, arg2); - break; - - default: - ShouldNotReachHere(); - } - } - break; - - case MethodHandles::_adapter_opt_i2l: - { - int arg_slot = - java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); - int arg = VMSLOTS_INT(arg_slot); - intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); - insert_vmslots(arg_slot, 1, THREAD); - if (HAS_PENDING_EXCEPTION) { - // all oops trashed - stack->set_sp(unwind_sp); - return; - } - vmslots = stack->sp(); - arg_slot++; - SET_VMSLOTS_LONG(arg, arg_slot); - } - break; - - case MethodHandles::_adapter_opt_unboxi: - case MethodHandles::_adapter_opt_unboxl: - { - int arg_slot = - java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); - oop arg = VMSLOTS_OBJECT(arg_slot); - jvalue arg_value; - if (arg == NULL) { - // queue a nullpointer exception for the caller - stack->set_sp(calculate_unwind_sp(stack, method_handle)); - CALL_VM_NOCHECK_NOFIX( - throw_exception( - thread, vmSymbols::java_lang_NullPointerException())); - // NB all oops trashed! - assert(HAS_PENDING_EXCEPTION, "should do"); - return; - } - BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value); - if (arg_type == T_LONG || arg_type == T_DOUBLE) { - intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); - insert_vmslots(arg_slot, 1, THREAD); - if (HAS_PENDING_EXCEPTION) { - // all oops trashed - stack->set_sp(unwind_sp); - return; - } - vmslots = stack->sp(); - arg_slot++; - } - switch (arg_type) { - case T_BOOLEAN: - SET_VMSLOTS_INT(arg_value.z, arg_slot); - break; - case T_CHAR: - SET_VMSLOTS_INT(arg_value.c, arg_slot); - break; - case T_BYTE: - SET_VMSLOTS_INT(arg_value.b, arg_slot); - break; - case T_SHORT: - SET_VMSLOTS_INT(arg_value.s, arg_slot); - break; - case T_INT: - SET_VMSLOTS_INT(arg_value.i, arg_slot); - break; - case T_FLOAT: - SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); - break; - case T_LONG: - SET_VMSLOTS_LONG(arg_value.j, arg_slot); - break; - case T_DOUBLE: - SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot); - break; - default: - tty->print_cr("unhandled type %s", type2name(arg_type)); - ShouldNotReachHere(); - } - } - break; - - default: - tty->print_cr("unhandled entry_kind %s", - MethodHandles::entry_name(entry_kind)); - ShouldNotReachHere(); - } - - // Continue along the chain - if (direct_to_method) { - if (method == NULL) { - method = - (Method*) java_lang_invoke_MethodHandle::vmtarget(method_handle); - } - address entry_point = method->from_interpreted_entry(); - Interpreter::invoke_method(method, entry_point, THREAD); - } - else { - process_method_handle( - java_lang_invoke_MethodHandle::next_target(method_handle), THREAD); - } - // NB all oops now trashed - - // Adapt the result type, if necessary - if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) { - switch (dst_rtype) { - case T_VOID: - for (int i = 0; i < type2size[src_rtype]; i++) - stack->pop(); - return; - - case T_INT: - switch (src_rtype) { - case T_VOID: - stack->overflow_check(1, CHECK); - stack->push(0); - return; - - case T_BOOLEAN: - case T_CHAR: - case T_BYTE: - case T_SHORT: - return; - } - // INT results sometimes need narrowing - case T_BOOLEAN: - case T_CHAR: - case T_BYTE: - case T_SHORT: - switch (src_rtype) { - case T_INT: - return; - } - } - - tty->print_cr("unhandled conversion:"); - tty->print_cr("src_rtype = %s", type2name(src_rtype)); - tty->print_cr("dst_rtype = %s", type2name(dst_rtype)); - ShouldNotReachHere(); - } -} - // The new slots will be inserted before slot insert_before. // Slots < insert_before will have the same slot number after the insert. // Slots >= insert_before will become old_slot + num_slots. @@ -1380,10 +851,6 @@ address AbstractInterpreterGenerator::generate_method_entry( entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break; - case Interpreter::method_handle: - entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry(); - break; - case Interpreter::java_lang_math_sin: case Interpreter::java_lang_math_cos: case Interpreter::java_lang_math_tan: @@ -1391,6 +858,8 @@ address AbstractInterpreterGenerator::generate_method_entry( case Interpreter::java_lang_math_log: case Interpreter::java_lang_math_log10: case Interpreter::java_lang_math_sqrt: + case Interpreter::java_lang_math_pow: + case Interpreter::java_lang_math_exp: entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; diff --git a/src/cpu/zero/vm/cppInterpreter_zero.hpp b/src/cpu/zero/vm/cppInterpreter_zero.hpp index 2faae7169ab970a32ba640debbbdd014d1345a09..3a6bd52f2cbb0a100c0aa5362150155db4a89ec4 100644 --- a/src/cpu/zero/vm/cppInterpreter_zero.hpp +++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp @@ -36,7 +36,6 @@ static int native_entry(Method* method, intptr_t UNUSED, TRAPS); static int accessor_entry(Method* method, intptr_t UNUSED, TRAPS); static int empty_entry(Method* method, intptr_t UNUSED, TRAPS); - static int method_handle_entry(Method* method, intptr_t UNUSED, TRAPS); public: // Main loop of normal_entry @@ -44,7 +43,6 @@ private: // Helpers for method_handle_entry - static void process_method_handle(oop method_handle, TRAPS); static void insert_vmslots(int insert_before, int num_slots, TRAPS); static void remove_vmslots(int first_slot, int num_slots, TRAPS); static BasicType result_type_of_handle(oop method_handle); diff --git a/src/cpu/zero/vm/frame_zero.cpp b/src/cpu/zero/vm/frame_zero.cpp index 6ce4b23bc6b3b03f29d7e547456b400f6ae3cc6f..8643af5953f6e2b1860fa850dca374e293f7b7ef 100644 --- a/src/cpu/zero/vm/frame_zero.cpp +++ b/src/cpu/zero/vm/frame_zero.cpp @@ -351,7 +351,7 @@ void SharkFrame::identify_word(int frame_index, switch (offset) { case pc_off: strncpy(fieldbuf, "pc", buflen); - if (method()->is_oop()) { + if (method()->is_method()) { nmethod *code = method()->code(); if (code && code->pc_desc_at(pc())) { SimpleScopeDesc ssd(code, pc()); @@ -367,7 +367,7 @@ void SharkFrame::identify_word(int frame_index, case method_off: strncpy(fieldbuf, "method", buflen); - if (method()->is_oop()) { + if (method()->is_method()) { method()->name_and_sig_as_C_string(valuebuf, buflen); } return; @@ -378,7 +378,7 @@ void SharkFrame::identify_word(int frame_index, } // Variable part - if (method()->is_oop()) { + if (method()->is_method()) { identify_vp_word(frame_index, addr_of_word(offset), addr_of_word(header_words + 1), unextended_sp() + method()->max_stack(), @@ -430,4 +430,3 @@ intptr_t *frame::initial_deoptimization_info() { // unused... but returns fp() to minimize changes introduced by 7087445 return fp(); } - diff --git a/src/cpu/zero/vm/frame_zero.inline.hpp b/src/cpu/zero/vm/frame_zero.inline.hpp index e41ec13798a2e8eefc25863b31e3d6138f398b45..2bc703ae03283ebfb4d1d4b2ca2a4fc36fb5d144 100644 --- a/src/cpu/zero/vm/frame_zero.inline.hpp +++ b/src/cpu/zero/vm/frame_zero.inline.hpp @@ -36,6 +36,8 @@ inline frame::frame() { _deopt_state = unknown; } +inline address frame::sender_pc() const { ShouldNotCallThis(); } + inline frame::frame(ZeroFrame* zf, intptr_t* sp) { _zeroframe = zf; _sp = sp; diff --git a/src/cpu/zero/vm/icBuffer_zero.cpp b/src/cpu/zero/vm/icBuffer_zero.cpp index af2f8bec5c16f0e5b64f55259aed0fc591b093f6..95d0e115a6658d817673622e6c427cf56133bce9 100644 --- a/src/cpu/zero/vm/icBuffer_zero.cpp +++ b/src/cpu/zero/vm/icBuffer_zero.cpp @@ -40,7 +40,7 @@ int InlineCacheBuffer::ic_stub_code_size() { } void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, - Metadata* cached_oop, + void* cached_oop, address entry_point) { // NB ic_stub_code_size() must return the size of the code we generate ShouldNotCallThis(); @@ -51,7 +51,6 @@ address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) { ShouldNotCallThis(); } -Metadata* InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) { - // NB ic_stub_code_size() must return the size of the code we generate +void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) { ShouldNotCallThis(); } diff --git a/src/cpu/zero/vm/methodHandles_zero.cpp b/src/cpu/zero/vm/methodHandles_zero.cpp index 35f4b11f5d3e377e127dc7688ca45f6df6acd4aa..ea1bc0eedc108e464a6b4fcb2ab315fed570b091 100644 --- a/src/cpu/zero/vm/methodHandles_zero.cpp +++ b/src/cpu/zero/vm/methodHandles_zero.cpp @@ -24,26 +24,159 @@ */ #include "precompiled.hpp" +#include "interpreter/interpreterGenerator.hpp" #include "interpreter/interpreter.hpp" #include "memory/allocation.inline.hpp" #include "prims/methodHandles.hpp" -int MethodHandles::adapter_conversion_ops_supported_mask() { - return ((1<zero_stack(); + InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); + interpreterState istate = frame->interpreter_state(); + + // Trim back the stack to put the parameters at the top + stack->set_sp(istate->stack() + 1); + + Interpreter::invoke_method(method, method->from_interpreted_entry(), THREAD); + + // Convert the result + istate->set_stack(stack->sp() - 1); + +} + +oop MethodHandles::popFromStack(TRAPS) { + + JavaThread *thread = (JavaThread *) THREAD; + InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); + interpreterState istate = frame->interpreter_state(); + intptr_t* topOfStack = istate->stack(); + + oop top = STACK_OBJECT(-1); + MORE_STACK(-1); + istate->set_stack(topOfStack); + + return top; + +} + +int MethodHandles::method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS) { + + JavaThread *thread = (JavaThread *) THREAD; + InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); + interpreterState istate = frame->interpreter_state(); + intptr_t* topOfStack = istate->stack(); + + // 'this' is a MethodHandle. We resolve the target method by accessing this.form.vmentry.vmtarget. + int numArgs = method->size_of_parameters(); + oop lform1 = java_lang_invoke_MethodHandle::form(STACK_OBJECT(-numArgs)); // this.form + oop vmEntry1 = java_lang_invoke_LambdaForm::vmentry(lform1); + Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmEntry1); + + invoke_target(vmtarget, THREAD); + + // No deoptimized frames on the stack + return 0; +} + +int MethodHandles::method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS) { + + // Pop appendix argument from stack. This is a MemberName which we resolve to the + // target method. + oop vmentry = popFromStack(THREAD); + + Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry); + + invoke_target(vmtarget, THREAD); + + return 0; +} + +int MethodHandles::method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); + interpreterState istate = frame->interpreter_state(); + + // Pop appendix argument from stack. This is a MemberName which we resolve to the + // target method. + oop vmentry = popFromStack(THREAD); + intptr_t* topOfStack = istate->stack(); + + // Resolve target method by looking up in the receiver object's itable. + Klass* clazz = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(vmentry)); + intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry); + Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry); + + int numArgs = target->size_of_parameters(); + oop recv = STACK_OBJECT(-numArgs); + + InstanceKlass* klass_part = InstanceKlass::cast(recv->klass()); + itableOffsetEntry* ki = (itableOffsetEntry*) klass_part->start_of_itable(); + int i; + for ( i = 0 ; i < klass_part->itable_length() ; i++, ki++ ) { + if (ki->interface_klass() == clazz) break; + } + + itableMethodEntry* im = ki->first_method_entry(recv->klass()); + Method* vmtarget = im[vmindex].method(); + + invoke_target(vmtarget, THREAD); + + return 0; +} + +int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + + InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); + interpreterState istate = frame->interpreter_state(); + + // Pop appendix argument from stack. This is a MemberName which we resolve to the + // target method. + oop vmentry = popFromStack(THREAD); + intptr_t* topOfStack = istate->stack(); + + // Resolve target method by looking up in the receiver object's vtable. + intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry); + Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry); + int numArgs = target->size_of_parameters(); + oop recv = STACK_OBJECT(-numArgs); + Klass* clazz = recv->klass(); + Klass* klass_part = InstanceKlass::cast(clazz); + klassVtable* vtable = klass_part->vtable(); + Method* vmtarget = vtable->method_at(vmindex); + + invoke_target(vmtarget, THREAD); + + return 0; +} + +int MethodHandles::method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS) { + ShouldNotReachHere(); + return 0; +} + +address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* masm, + vmIntrinsics::ID iid) { + switch (iid) { + case vmIntrinsics::_invokeGeneric: + case vmIntrinsics::_compiledLambdaForm: + // Perhaps surprisingly, the symbolic references visible to Java are not directly used. + // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. + // They all allow an appendix argument. + return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid); + case vmIntrinsics::_invokeBasic: + return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic); + case vmIntrinsics::_linkToStatic: + case vmIntrinsics::_linkToSpecial: + return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial); + case vmIntrinsics::_linkToInterface: + return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface); + case vmIntrinsics::_linkToVirtual: + return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual); + default: + ShouldNotReachHere(); + return NULL; + } } diff --git a/src/cpu/zero/vm/methodHandles_zero.hpp b/src/cpu/zero/vm/methodHandles_zero.hpp index a71fce34f12048b4b7c141abea214a8a3cc00107..f4eec005c4d712ebb43f80c7274148b933cf93a7 100644 --- a/src/cpu/zero/vm/methodHandles_zero.hpp +++ b/src/cpu/zero/vm/methodHandles_zero.hpp @@ -26,6 +26,14 @@ // Adapters enum /* platform_dependent_constants */ { - adapter_code_size = 0 + adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1) }; +private: + static oop popFromStack(TRAPS); + static void invoke_target(Method* method, TRAPS); + static int method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS); + static int method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS); + static int method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS); + static int method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS); + static int method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS); diff --git a/src/cpu/zero/vm/register_zero.hpp b/src/cpu/zero/vm/register_zero.hpp index 0bcc763824878b865857a8255a59fe8a5821a0d4..1ce7141adafeee337a60e5dc79383b7ea8a7ba4b 100644 --- a/src/cpu/zero/vm/register_zero.hpp +++ b/src/cpu/zero/vm/register_zero.hpp @@ -114,5 +114,8 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl { }; CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); +#ifndef DONT_USE_REGISTER_DEFINES +#define noreg ((Register)(noreg_RegisterEnumValue)) +#endif #endif // CPU_ZERO_VM_REGISTER_ZERO_HPP diff --git a/src/cpu/zero/vm/relocInfo_zero.cpp b/src/cpu/zero/vm/relocInfo_zero.cpp index 13f095e474678496dadd378425d9d8390eb065a3..ed7ee7bca6a344a4a0d6a86fec733d63c91e5fbc 100644 --- a/src/cpu/zero/vm/relocInfo_zero.cpp +++ b/src/cpu/zero/vm/relocInfo_zero.cpp @@ -77,3 +77,7 @@ void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dst) { ShouldNotCallThis(); } + +void metadata_Relocation::pd_fix_value(address x) { + ShouldNotCallThis(); +} diff --git a/src/cpu/zero/vm/sharedRuntime_zero.cpp b/src/cpu/zero/vm/sharedRuntime_zero.cpp index 3cb8cd7e46af1f25bedc92be549458cd40cc47ad..123d71ec044680ecf1211d6aa8ba428d1a54f000 100644 --- a/src/cpu/zero/vm/sharedRuntime_zero.cpp +++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp @@ -35,6 +35,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "vmreg_zero.inline.hpp" + #ifdef COMPILER1 #include "c1/c1_Runtime1.hpp" #endif @@ -47,6 +48,12 @@ #endif + +static address zero_null_code_stub() { + address start = ShouldNotCallThisStub(); + return start; +} + int SharedRuntime::java_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed, @@ -63,16 +70,14 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters( AdapterFingerPrint *fingerprint) { return AdapterHandlerLibrary::new_entry( fingerprint, - ShouldNotCallThisStub(), - ShouldNotCallThisStub(), - ShouldNotCallThisStub()); + CAST_FROM_FN_PTR(address,zero_null_code_stub), + CAST_FROM_FN_PTR(address,zero_null_code_stub), + CAST_FROM_FN_PTR(address,zero_null_code_stub)); } nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, methodHandle method, int compile_id, - int total_args_passed, - int max_arg, BasicType *sig_bt, VMRegPair *regs, BasicType ret_type) { @@ -96,19 +101,20 @@ uint SharedRuntime::out_preserve_stack_slots() { ShouldNotCallThis(); } +JRT_LEAF(void, zero_stub()) + ShouldNotCallThis(); +JRT_END + static RuntimeStub* generate_empty_runtime_stub(const char* name) { - CodeBuffer buffer(name, 0, 0); - return RuntimeStub::new_runtime_stub(name, &buffer, 0, 0, NULL, false); + return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub); } static SafepointBlob* generate_empty_safepoint_blob() { - CodeBuffer buffer("handler_blob", 0, 0); - return SafepointBlob::create(&buffer, NULL, 0); + return CAST_FROM_FN_PTR(SafepointBlob*,zero_stub); } static DeoptimizationBlob* generate_empty_deopt_blob() { - CodeBuffer buffer("handler_blob", 0, 0); - return DeoptimizationBlob::create(&buffer, NULL, 0, 0, 0, 0); + return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub); } @@ -116,7 +122,7 @@ void SharedRuntime::generate_deopt_blob() { _deopt_blob = generate_empty_deopt_blob(); } -SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) { +SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { return generate_empty_safepoint_blob(); } @@ -124,6 +130,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha return generate_empty_runtime_stub("resolve_blob"); } + int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) { diff --git a/src/os/bsd/vm/attachListener_bsd.cpp b/src/os/bsd/vm/attachListener_bsd.cpp index 53fa2e26e65a915e1e9c0be8dc0f82b5be636a42..81fba25efd8fa61a24c5a7b2598650eafa355702 100644 --- a/src/os/bsd/vm/attachListener_bsd.cpp +++ b/src/os/bsd/vm/attachListener_bsd.cpp @@ -342,7 +342,6 @@ BsdAttachOperation* BsdAttachListener::dequeue() { // get the credentials of the peer and check the effective uid/guid // - check with jeff on this. -#ifdef _ALLBSD_SOURCE uid_t puid; gid_t pgid; if (::getpeereid(s, &puid, &pgid) != 0) { @@ -350,17 +349,6 @@ BsdAttachOperation* BsdAttachListener::dequeue() { RESTARTABLE(::close(s), res); continue; } -#else - struct ucred cred_info; - socklen_t optlen = sizeof(cred_info); - if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) { - int res; - RESTARTABLE(::close(s), res); - continue; - } - uid_t puid = cred_info.uid; - gid_t pgid = cred_info.gid; -#endif uid_t euid = geteuid(); gid_t egid = getegid(); diff --git a/src/os/bsd/vm/osThread_bsd.hpp b/src/os/bsd/vm/osThread_bsd.hpp index c72b36025f68e2c569ba69efef39e4b78cc367cc..a5d5039f42d5c1dbe9cd26532cf75c3f411aba8e 100644 --- a/src/os/bsd/vm/osThread_bsd.hpp +++ b/src/os/bsd/vm/osThread_bsd.hpp @@ -39,18 +39,12 @@ private: -#ifdef _ALLBSD_SOURCE - #ifdef __APPLE__ typedef thread_t thread_id_t; #else typedef pthread_t thread_id_t; #endif -#else - typedef pid_t thread_id_t; -#endif - // _pthread_id is the pthread id, which is used by library calls // (e.g. pthread_kill). pthread_t _pthread_id; diff --git a/src/os/bsd/vm/os_bsd.cpp b/src/os/bsd/vm/os_bsd.cpp index 984e25cb94287aa94e9fec87d7ba0f85e6d980bd..9a845fc9ec8bf7a653956c5f6d65012e3e9cd403 100644 --- a/src/os/bsd/vm/os_bsd.cpp +++ b/src/os/bsd/vm/os_bsd.cpp @@ -108,14 +108,8 @@ # include # include # include -#ifdef _ALLBSD_SOURCE # include # include -#else -# include -# include -# include -#endif # include # include #ifndef __APPLE__ @@ -150,25 +144,10 @@ // global variables julong os::Bsd::_physical_memory = 0; -#ifndef _ALLBSD_SOURCE -address os::Bsd::_initial_thread_stack_bottom = NULL; -uintptr_t os::Bsd::_initial_thread_stack_size = 0; -#endif int (*os::Bsd::_clock_gettime)(clockid_t, struct timespec *) = NULL; -#ifndef _ALLBSD_SOURCE -int (*os::Bsd::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL; -Mutex* os::Bsd::_createThread_lock = NULL; -#endif pthread_t os::Bsd::_main_thread; int os::Bsd::_page_size = -1; -#ifndef _ALLBSD_SOURCE -bool os::Bsd::_is_floating_stack = false; -bool os::Bsd::_is_NPTL = false; -bool os::Bsd::_supports_fast_thread_cpu_time = false; -const char * os::Bsd::_glibc_version = NULL; -const char * os::Bsd::_libpthread_version = NULL; -#endif static jlong initial_time_count=0; @@ -176,7 +155,7 @@ static int clock_tics_per_sec = 100; // For diagnostics to print a message once. see run_periodic_checks static sigset_t check_signal_done; -static bool check_signals = true;; +static bool check_signals = true; static pid_t _initial_pid = 0; @@ -198,16 +177,8 @@ julong os::available_memory() { } julong os::Bsd::available_memory() { -#ifdef _ALLBSD_SOURCE // XXXBSD: this is just a stopgap implementation return physical_memory() >> 2; -#else - // values in struct sysinfo are "unsigned long" - struct sysinfo si; - sysinfo(&si); - - return (julong)si.freeram * si.mem_unit; -#endif } julong os::physical_memory() { @@ -255,22 +226,6 @@ bool os::have_special_privileges() { } -#ifndef _ALLBSD_SOURCE -#ifndef SYS_gettid -// i386: 224, ia64: 1105, amd64: 186, sparc 143 -#ifdef __ia64__ -#define SYS_gettid 1105 -#elif __i386__ -#define SYS_gettid 224 -#elif __amd64__ -#define SYS_gettid 186 -#elif __sparc__ -#define SYS_gettid 143 -#else -#error define gettid for the arch -#endif -#endif -#endif // Cpu architecture string #if defined(ZERO) @@ -302,36 +257,7 @@ static char cpu_arch[] = "sparc"; #define COMPILER_VARIANT "client" #endif -#ifndef _ALLBSD_SOURCE -// pid_t gettid() -// -// Returns the kernel thread id of the currently running thread. Kernel -// thread id is used to access /proc. -// -// (Note that getpid() on BsdThreads returns kernel thread id too; but -// on NPTL, it returns the same pid for all threads, as required by POSIX.) -// -pid_t os::Bsd::gettid() { - int rslt = syscall(SYS_gettid); - if (rslt == -1) { - // old kernel, no NPTL support - return getpid(); - } else { - return (pid_t)rslt; - } -} - -// Most versions of bsd have a bug where the number of processors are -// determined by looking at the /proc file system. In a chroot environment, -// the system call returns 1. This causes the VM to act as if it is -// a single processor and elide locking (see is_MP() call). -static bool unsafe_chroot_detected = false; -static const char *unstable_chroot_error = "/proc file system not found.\n" - "Java may be unstable running multithreaded in a chroot " - "environment on Bsd when /proc filesystem is not mounted."; -#endif -#ifdef _ALLBSD_SOURCE void os::Bsd::initialize_system_info() { int mib[2]; size_t len; @@ -370,24 +296,6 @@ void os::Bsd::initialize_system_info() { } #endif } -#else -void os::Bsd::initialize_system_info() { - set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); - if (processor_count() == 1) { - pid_t pid = os::Bsd::gettid(); - char fname[32]; - jio_snprintf(fname, sizeof(fname), "/proc/%d", pid); - FILE *fp = fopen(fname, "r"); - if (fp == NULL) { - unsafe_chroot_detected = true; - } else { - fclose(fp); - } - } - _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); - assert(processor_count() > 0, "bsd error"); -} -#endif #ifdef __APPLE__ static const char *get_home() { @@ -744,171 +652,6 @@ void os::Bsd::hotspot_sigmask(Thread* thread) { } } -#ifndef _ALLBSD_SOURCE -////////////////////////////////////////////////////////////////////////////// -// detecting pthread library - -void os::Bsd::libpthread_init() { - // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION - // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a - // generic name for earlier versions. - // Define macros here so we can build HotSpot on old systems. -# ifndef _CS_GNU_LIBC_VERSION -# define _CS_GNU_LIBC_VERSION 2 -# endif -# ifndef _CS_GNU_LIBPTHREAD_VERSION -# define _CS_GNU_LIBPTHREAD_VERSION 3 -# endif - - size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0); - if (n > 0) { - char *str = (char *)malloc(n); - confstr(_CS_GNU_LIBC_VERSION, str, n); - os::Bsd::set_glibc_version(str); - } else { - // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version() - static char _gnu_libc_version[32]; - jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version), - "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release()); - os::Bsd::set_glibc_version(_gnu_libc_version); - } - - n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0); - if (n > 0) { - char *str = (char *)malloc(n); - confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); - // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells - // us "NPTL-0.29" even we are running with BsdThreads. Check if this - // is the case. BsdThreads has a hard limit on max number of threads. - // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value. - // On the other hand, NPTL does not have such a limit, sysconf() - // will return -1 and errno is not changed. Check if it is really NPTL. - if (strcmp(os::Bsd::glibc_version(), "glibc 2.3.2") == 0 && - strstr(str, "NPTL") && - sysconf(_SC_THREAD_THREADS_MAX) > 0) { - free(str); - os::Bsd::set_libpthread_version("bsdthreads"); - } else { - os::Bsd::set_libpthread_version(str); - } - } else { - // glibc before 2.3.2 only has BsdThreads. - os::Bsd::set_libpthread_version("bsdthreads"); - } - - if (strstr(libpthread_version(), "NPTL")) { - os::Bsd::set_is_NPTL(); - } else { - os::Bsd::set_is_BsdThreads(); - } - - // BsdThreads have two flavors: floating-stack mode, which allows variable - // stack size; and fixed-stack mode. NPTL is always floating-stack. - if (os::Bsd::is_NPTL() || os::Bsd::supports_variable_stack_size()) { - os::Bsd::set_is_floating_stack(); - } -} - -///////////////////////////////////////////////////////////////////////////// -// thread stack - -// Force Bsd kernel to expand current thread stack. If "bottom" is close -// to the stack guard, caller should block all signals. -// -// MAP_GROWSDOWN: -// A special mmap() flag that is used to implement thread stacks. It tells -// kernel that the memory region should extend downwards when needed. This -// allows early versions of BsdThreads to only mmap the first few pages -// when creating a new thread. Bsd kernel will automatically expand thread -// stack as needed (on page faults). -// -// However, because the memory region of a MAP_GROWSDOWN stack can grow on -// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN -// region, it's hard to tell if the fault is due to a legitimate stack -// access or because of reading/writing non-exist memory (e.g. buffer -// overrun). As a rule, if the fault happens below current stack pointer, -// Bsd kernel does not expand stack, instead a SIGSEGV is sent to the -// application (see Bsd kernel fault.c). -// -// This Bsd feature can cause SIGSEGV when VM bangs thread stack for -// stack overflow detection. -// -// Newer version of BsdThreads (since glibc-2.2, or, RH-7.x) and NPTL do -// not use this flag. However, the stack of initial thread is not created -// by pthread, it is still MAP_GROWSDOWN. Also it's possible (though -// unlikely) that user code can create a thread with MAP_GROWSDOWN stack -// and then attach the thread to JVM. -// -// To get around the problem and allow stack banging on Bsd, we need to -// manually expand thread stack after receiving the SIGSEGV. -// -// There are two ways to expand thread stack to address "bottom", we used -// both of them in JVM before 1.5: -// 1. adjust stack pointer first so that it is below "bottom", and then -// touch "bottom" -// 2. mmap() the page in question -// -// Now alternate signal stack is gone, it's harder to use 2. For instance, -// if current sp is already near the lower end of page 101, and we need to -// call mmap() to map page 100, it is possible that part of the mmap() frame -// will be placed in page 100. When page 100 is mapped, it is zero-filled. -// That will destroy the mmap() frame and cause VM to crash. -// -// The following code works by adjusting sp first, then accessing the "bottom" -// page to force a page fault. Bsd kernel will then automatically expand the -// stack mapping. -// -// _expand_stack_to() assumes its frame size is less than page size, which -// should always be true if the function is not inlined. - -#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute -#define NOINLINE -#else -#define NOINLINE __attribute__ ((noinline)) -#endif - -static void _expand_stack_to(address bottom) NOINLINE; - -static void _expand_stack_to(address bottom) { - address sp; - size_t size; - volatile char *p; - - // Adjust bottom to point to the largest address within the same page, it - // gives us a one-page buffer if alloca() allocates slightly more memory. - bottom = (address)align_size_down((uintptr_t)bottom, os::Bsd::page_size()); - bottom += os::Bsd::page_size() - 1; - - // sp might be slightly above current stack pointer; if that's the case, we - // will alloca() a little more space than necessary, which is OK. Don't use - // os::current_stack_pointer(), as its result can be slightly below current - // stack pointer, causing us to not alloca enough to reach "bottom". - sp = (address)&sp; - - if (sp > bottom) { - size = sp - bottom; - p = (volatile char *)alloca(size); - assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?"); - p[0] = '\0'; - } -} - -bool os::Bsd::manually_expand_stack(JavaThread * t, address addr) { - assert(t!=NULL, "just checking"); - assert(t->osthread()->expanding_stack(), "expand should be set"); - assert(t->stack_base() != NULL, "stack_base was not initialized"); - - if (addr < t->stack_base() && addr >= t->stack_yellow_zone_base()) { - sigset_t mask_all, old_sigset; - sigfillset(&mask_all); - pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset); - _expand_stack_to(addr); - pthread_sigmask(SIG_SETMASK, &old_sigset, NULL); - return true; - } - return false; -} -#endif ////////////////////////////////////////////////////////////////////////////// // create new thread @@ -917,43 +660,7 @@ static address highest_vm_reserved_address(); // check if it's safe to start a new thread static bool _thread_safety_check(Thread* thread) { -#ifdef _ALLBSD_SOURCE - return true; -#else - if (os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack()) { - // Fixed stack BsdThreads (SuSE Bsd/x86, and some versions of Redhat) - // Heap is mmap'ed at lower end of memory space. Thread stacks are - // allocated (MAP_FIXED) from high address space. Every thread stack - // occupies a fixed size slot (usually 2Mbytes, but user can change - // it to other values if they rebuild BsdThreads). - // - // Problem with MAP_FIXED is that mmap() can still succeed even part of - // the memory region has already been mmap'ed. That means if we have too - // many threads and/or very large heap, eventually thread stack will - // collide with heap. - // - // Here we try to prevent heap/stack collision by comparing current - // stack bottom with the highest address that has been mmap'ed by JVM - // plus a safety margin for memory maps created by native code. - // - // This feature can be disabled by setting ThreadSafetyMargin to 0 - // - if (ThreadSafetyMargin > 0) { - address stack_bottom = os::current_stack_base() - os::current_stack_size(); - - // not safe if our stack extends below the safety margin - return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address(); - } else { - return true; - } - } else { - // Floating stack BsdThreads or NPTL: - // Unlike fixed stack BsdThreads, thread stacks are not MAP_FIXED. When - // there's not enough space left, pthread_create() will fail. If we come - // here, that means enough space has been reserved for stack. - return true; - } -#endif + return true; } #ifdef __APPLE__ @@ -991,24 +698,12 @@ static void *java_start(Thread *thread) { return NULL; } -#ifdef _ALLBSD_SOURCE #ifdef __APPLE__ // thread_id is mach thread on macos osthread->set_thread_id(::mach_thread_self()); #else // thread_id is pthread_id on BSD osthread->set_thread_id(::pthread_self()); -#endif -#else - // thread_id is kernel thread id (similar to Solaris LWP id) - osthread->set_thread_id(os::Bsd::gettid()); - - if (UseNUMA) { - int lgrp_id = os::numa_get_group_id(); - if (lgrp_id != -1) { - thread->set_lgrp_id(lgrp_id); - } - } #endif // initialize signal mask for this thread os::Bsd::hotspot_sigmask(thread); @@ -1099,23 +794,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { // let pthread_create() pick the default value. } -#ifndef _ALLBSD_SOURCE - // glibc guard page - pthread_attr_setguardsize(&attr, os::Bsd::default_guard_size(thr_type)); -#endif - ThreadState state; { - -#ifndef _ALLBSD_SOURCE - // Serialize thread creation if we are running with fixed stack BsdThreads - bool lock = os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack(); - if (lock) { - os::Bsd::createThread_lock()->lock_without_safepoint_check(); - } -#endif - pthread_t tid; int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread); @@ -1128,9 +809,6 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { // Need to clean up stuff we've allocated so far thread->set_osthread(NULL); delete osthread; -#ifndef _ALLBSD_SOURCE - if (lock) os::Bsd::createThread_lock()->unlock(); -#endif return false; } @@ -1146,11 +824,6 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { } } -#ifndef _ALLBSD_SOURCE - if (lock) { - os::Bsd::createThread_lock()->unlock(); - } -#endif } // Aborted due to thread limit being reached @@ -1188,14 +861,10 @@ bool os::create_attached_thread(JavaThread* thread) { } // Store pthread info into the OSThread -#ifdef _ALLBSD_SOURCE #ifdef __APPLE__ osthread->set_thread_id(::mach_thread_self()); #else osthread->set_thread_id(::pthread_self()); -#endif -#else - osthread->set_thread_id(os::Bsd::gettid()); #endif osthread->set_pthread_id(::pthread_self()); @@ -1207,35 +876,6 @@ bool os::create_attached_thread(JavaThread* thread) { thread->set_osthread(osthread); -#ifndef _ALLBSD_SOURCE - if (UseNUMA) { - int lgrp_id = os::numa_get_group_id(); - if (lgrp_id != -1) { - thread->set_lgrp_id(lgrp_id); - } - } - - if (os::Bsd::is_initial_thread()) { - // If current thread is initial thread, its stack is mapped on demand, - // see notes about MAP_GROWSDOWN. Here we try to force kernel to map - // the entire stack region to avoid SEGV in stack banging. - // It is also useful to get around the heap-stack-gap problem on SuSE - // kernel (see 4821821 for details). We first expand stack to the top - // of yellow zone, then enable stack yellow zone (order is significant, - // enabling yellow zone first will crash JVM on SuSE Bsd), so there - // is no gap between the last two virtual memory regions. - - JavaThread *jt = (JavaThread *)thread; - address addr = jt->stack_yellow_zone_base(); - assert(addr != NULL, "initialization problem?"); - assert(jt->stack_available(addr) > 0, "stack guard should not be enabled"); - - osthread->set_expanding_stack(); - os::Bsd::manually_expand_stack(jt, addr); - osthread->clear_expanding_stack(); - } -#endif - // initialize signal mask for this thread // and save the caller's signal mask os::Bsd::hotspot_sigmask(thread); @@ -1290,247 +930,6 @@ extern "C" Thread* get_thread() { return ThreadLocalStorage::thread(); } -////////////////////////////////////////////////////////////////////////////// -// initial thread - -#ifndef _ALLBSD_SOURCE -// Check if current thread is the initial thread, similar to Solaris thr_main. -bool os::Bsd::is_initial_thread(void) { - char dummy; - // If called before init complete, thread stack bottom will be null. - // Can be called if fatal error occurs before initialization. - if (initial_thread_stack_bottom() == NULL) return false; - assert(initial_thread_stack_bottom() != NULL && - initial_thread_stack_size() != 0, - "os::init did not locate initial thread's stack region"); - if ((address)&dummy >= initial_thread_stack_bottom() && - (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size()) - return true; - else return false; -} - -// Find the virtual memory area that contains addr -static bool find_vma(address addr, address* vma_low, address* vma_high) { - FILE *fp = fopen("/proc/self/maps", "r"); - if (fp) { - address low, high; - while (!feof(fp)) { - if (fscanf(fp, "%p-%p", &low, &high) == 2) { - if (low <= addr && addr < high) { - if (vma_low) *vma_low = low; - if (vma_high) *vma_high = high; - fclose (fp); - return true; - } - } - for (;;) { - int ch = fgetc(fp); - if (ch == EOF || ch == (int)'\n') break; - } - } - fclose(fp); - } - return false; -} - -// Locate initial thread stack. This special handling of initial thread stack -// is needed because pthread_getattr_np() on most (all?) Bsd distros returns -// bogus value for initial thread. -void os::Bsd::capture_initial_stack(size_t max_size) { - // stack size is the easy part, get it from RLIMIT_STACK - size_t stack_size; - struct rlimit rlim; - getrlimit(RLIMIT_STACK, &rlim); - stack_size = rlim.rlim_cur; - - // 6308388: a bug in ld.so will relocate its own .data section to the - // lower end of primordial stack; reduce ulimit -s value a little bit - // so we won't install guard page on ld.so's data section. - stack_size -= 2 * page_size(); - - // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat - // 7.1, in both cases we will get 2G in return value. - // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0, - // SuSE 7.2, Debian) can not handle alternate signal stack correctly - // for initial thread if its stack size exceeds 6M. Cap it at 2M, - // in case other parts in glibc still assumes 2M max stack size. - // FIXME: alt signal stack is gone, maybe we can relax this constraint? -#ifndef IA64 - if (stack_size > 2 * K * K) stack_size = 2 * K * K; -#else - // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small - if (stack_size > 4 * K * K) stack_size = 4 * K * K; -#endif - - // Try to figure out where the stack base (top) is. This is harder. - // - // When an application is started, glibc saves the initial stack pointer in - // a global variable "__libc_stack_end", which is then used by system - // libraries. __libc_stack_end should be pretty close to stack top. The - // variable is available since the very early days. However, because it is - // a private interface, it could disappear in the future. - // - // Bsd kernel saves start_stack information in /proc//stat. Similar - // to __libc_stack_end, it is very close to stack top, but isn't the real - // stack top. Note that /proc may not exist if VM is running as a chroot - // program, so reading /proc//stat could fail. Also the contents of - // /proc//stat could change in the future (though unlikely). - // - // We try __libc_stack_end first. If that doesn't work, look for - // /proc//stat. If neither of them works, we use current stack pointer - // as a hint, which should work well in most cases. - - uintptr_t stack_start; - - // try __libc_stack_end first - uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end"); - if (p && *p) { - stack_start = *p; - } else { - // see if we can get the start_stack field from /proc/self/stat - FILE *fp; - int pid; - char state; - int ppid; - int pgrp; - int session; - int nr; - int tpgrp; - unsigned long flags; - unsigned long minflt; - unsigned long cminflt; - unsigned long majflt; - unsigned long cmajflt; - unsigned long utime; - unsigned long stime; - long cutime; - long cstime; - long prio; - long nice; - long junk; - long it_real; - uintptr_t start; - uintptr_t vsize; - intptr_t rss; - uintptr_t rsslim; - uintptr_t scodes; - uintptr_t ecode; - int i; - - // Figure what the primordial thread stack base is. Code is inspired - // by email from Hans Boehm. /proc/self/stat begins with current pid, - // followed by command name surrounded by parentheses, state, etc. - char stat[2048]; - int statlen; - - fp = fopen("/proc/self/stat", "r"); - if (fp) { - statlen = fread(stat, 1, 2047, fp); - stat[statlen] = '\0'; - fclose(fp); - - // Skip pid and the command string. Note that we could be dealing with - // weird command names, e.g. user could decide to rename java launcher - // to "java 1.4.2 :)", then the stat file would look like - // 1234 (java 1.4.2 :)) R ... ... - // We don't really need to know the command string, just find the last - // occurrence of ")" and then start parsing from there. See bug 4726580. - char * s = strrchr(stat, ')'); - - i = 0; - if (s) { - // Skip blank chars - do s++; while (isspace(*s)); - -#define _UFM UINTX_FORMAT -#define _DFM INTX_FORMAT - - /* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */ - /* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */ - i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM, - &state, /* 3 %c */ - &ppid, /* 4 %d */ - &pgrp, /* 5 %d */ - &session, /* 6 %d */ - &nr, /* 7 %d */ - &tpgrp, /* 8 %d */ - &flags, /* 9 %lu */ - &minflt, /* 10 %lu */ - &cminflt, /* 11 %lu */ - &majflt, /* 12 %lu */ - &cmajflt, /* 13 %lu */ - &utime, /* 14 %lu */ - &stime, /* 15 %lu */ - &cutime, /* 16 %ld */ - &cstime, /* 17 %ld */ - &prio, /* 18 %ld */ - &nice, /* 19 %ld */ - &junk, /* 20 %ld */ - &it_real, /* 21 %ld */ - &start, /* 22 UINTX_FORMAT */ - &vsize, /* 23 UINTX_FORMAT */ - &rss, /* 24 INTX_FORMAT */ - &rsslim, /* 25 UINTX_FORMAT */ - &scodes, /* 26 UINTX_FORMAT */ - &ecode, /* 27 UINTX_FORMAT */ - &stack_start); /* 28 UINTX_FORMAT */ - } - -#undef _UFM -#undef _DFM - - if (i != 28 - 2) { - assert(false, "Bad conversion from /proc/self/stat"); - // product mode - assume we are the initial thread, good luck in the - // embedded case. - warning("Can't detect initial thread stack location - bad conversion"); - stack_start = (uintptr_t) &rlim; - } - } else { - // For some reason we can't open /proc/self/stat (for example, running on - // FreeBSD with a Bsd emulator, or inside chroot), this should work for - // most cases, so don't abort: - warning("Can't detect initial thread stack location - no /proc/self/stat"); - stack_start = (uintptr_t) &rlim; - } - } - - // Now we have a pointer (stack_start) very close to the stack top, the - // next thing to do is to figure out the exact location of stack top. We - // can find out the virtual memory area that contains stack_start by - // reading /proc/self/maps, it should be the last vma in /proc/self/maps, - // and its upper limit is the real stack top. (again, this would fail if - // running inside chroot, because /proc may not exist.) - - uintptr_t stack_top; - address low, high; - if (find_vma((address)stack_start, &low, &high)) { - // success, "high" is the true stack top. (ignore "low", because initial - // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.) - stack_top = (uintptr_t)high; - } else { - // failed, likely because /proc/self/maps does not exist - warning("Can't detect initial thread stack location - find_vma failed"); - // best effort: stack_start is normally within a few pages below the real - // stack top, use it as stack top, and reduce stack size so we won't put - // guard page outside stack. - stack_top = stack_start; - stack_size -= 16 * page_size(); - } - - // stack_top could be partially down the page so align it - stack_top = align_size_up(stack_top, page_size()); - - if (max_size && stack_size > max_size) { - _initial_thread_stack_size = max_size; - } else { - _initial_thread_stack_size = stack_size; - } - - _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size()); - _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; -} -#endif //////////////////////////////////////////////////////////////////////////////// // time support @@ -1576,7 +975,7 @@ jlong os::javaTimeMillis() { void os::Bsd::clock_init() { // XXXDARWIN: Investigate replacement monotonic clock } -#elif defined(_ALLBSD_SOURCE) +#else void os::Bsd::clock_init() { struct timespec res; struct timespec tp; @@ -1586,86 +985,8 @@ void os::Bsd::clock_init() { _clock_gettime = ::clock_gettime; } } -#else -void os::Bsd::clock_init() { - // we do dlopen's in this particular order due to bug in bsd - // dynamical loader (see 6348968) leading to crash on exit - void* handle = dlopen("librt.so.1", RTLD_LAZY); - if (handle == NULL) { - handle = dlopen("librt.so", RTLD_LAZY); - } - - if (handle) { - int (*clock_getres_func)(clockid_t, struct timespec*) = - (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres"); - int (*clock_gettime_func)(clockid_t, struct timespec*) = - (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime"); - if (clock_getres_func && clock_gettime_func) { - // See if monotonic clock is supported by the kernel. Note that some - // early implementations simply return kernel jiffies (updated every - // 1/100 or 1/1000 second). It would be bad to use such a low res clock - // for nano time (though the monotonic property is still nice to have). - // It's fixed in newer kernels, however clock_getres() still returns - // 1/HZ. We check if clock_getres() works, but will ignore its reported - // resolution for now. Hopefully as people move to new kernels, this - // won't be a problem. - struct timespec res; - struct timespec tp; - if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 && - clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) { - // yes, monotonic clock is supported - _clock_gettime = clock_gettime_func; - } else { - // close librt if there is no monotonic clock - dlclose(handle); - } - } - } -} -#endif - -#ifndef _ALLBSD_SOURCE -#ifndef SYS_clock_getres - -#if defined(IA32) || defined(AMD64) -#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229) -#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y) -#else -#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time" -#define sys_clock_getres(x,y) -1 #endif -#else -#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y) -#endif - -void os::Bsd::fast_thread_clock_init() { - if (!UseBsdPosixThreadCPUClocks) { - return; - } - clockid_t clockid; - struct timespec tp; - int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) = - (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid"); - - // Switch to using fast clocks for thread cpu time if - // the sys_clock_getres() returns 0 error code. - // Note, that some kernels may support the current thread - // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks - // returned by the pthread_getcpuclockid(). - // If the fast Posix clocks are supported then the sys_clock_getres() - // must return at least tp.tv_sec == 0 which means a resolution - // better than 1 sec. This is extra check for reliability. - - if(pthread_getcpuclockid_func && - pthread_getcpuclockid_func(_main_thread, &clockid) == 0 && - sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) { - - _supports_fast_thread_cpu_time = true; - _pthread_getcpuclockid = pthread_getcpuclockid_func; - } -} -#endif jlong os::javaTimeNanos() { if (Bsd::supports_monotonic_clock()) { @@ -1978,7 +1299,6 @@ bool os::dll_address_to_function_name(address addr, char *buf, return false; } -#ifdef _ALLBSD_SOURCE // ported from solaris version bool os::dll_address_to_library_name(address addr, char* buf, int buflen, int* offset) { @@ -1994,86 +1314,10 @@ bool os::dll_address_to_library_name(address addr, char* buf, return false; } } -#else -struct _address_to_library_name { - address addr; // input : memory address - size_t buflen; // size of fname - char* fname; // output: library name - address base; // library base addr -}; -static int address_to_library_name_callback(struct dl_phdr_info *info, - size_t size, void *data) { - int i; - bool found = false; - address libbase = NULL; - struct _address_to_library_name * d = (struct _address_to_library_name *)data; - - // iterate through all loadable segments - for (i = 0; i < info->dlpi_phnum; i++) { - address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr); - if (info->dlpi_phdr[i].p_type == PT_LOAD) { - // base address of a library is the lowest address of its loaded - // segments. - if (libbase == NULL || libbase > segbase) { - libbase = segbase; - } - // see if 'addr' is within current segment - if (segbase <= d->addr && - d->addr < segbase + info->dlpi_phdr[i].p_memsz) { - found = true; - } - } - } - - // dlpi_name is NULL or empty if the ELF file is executable, return 0 - // so dll_address_to_library_name() can fall through to use dladdr() which - // can figure out executable name from argv[0]. - if (found && info->dlpi_name && info->dlpi_name[0]) { - d->base = libbase; - if (d->fname) { - jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name); - } - return 1; - } - return 0; -} - -bool os::dll_address_to_library_name(address addr, char* buf, - int buflen, int* offset) { - Dl_info dlinfo; - struct _address_to_library_name data; - - // There is a bug in old glibc dladdr() implementation that it could resolve - // to wrong library name if the .so file has a base address != NULL. Here - // we iterate through the program headers of all loaded libraries to find - // out which library 'addr' really belongs to. This workaround can be - // removed once the minimum requirement for glibc is moved to 2.3.x. - data.addr = addr; - data.fname = buf; - data.buflen = buflen; - data.base = NULL; - int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data); - - if (rslt) { - // buf already contains library name - if (offset) *offset = addr - data.base; - return true; - } else if (dladdr((void*)addr, &dlinfo)){ - if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); - if (offset) *offset = addr - (address)dlinfo.dli_fbase; - return true; - } else { - if (buf) buf[0] = '\0'; - if (offset) *offset = -1; - return false; - } -} -#endif - - // Loads .dll/.so and - // in case of error it checks if .dll/.so was built for the - // same architecture as Hotspot is running on +// Loads .dll/.so and +// in case of error it checks if .dll/.so was built for the +// same architecture as Hotspot is running on #ifdef __APPLE__ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) { @@ -2292,7 +1536,6 @@ static bool _print_ascii_file(const char* filename, outputStream* st) { void os::print_dll_info(outputStream *st) { st->print_cr("Dynamic libraries:"); -#ifdef _ALLBSD_SOURCE #ifdef RTLD_DI_LINKMAP Dl_info dli; void *handle; @@ -2336,16 +1579,6 @@ void os::print_dll_info(outputStream *st) { #else st->print_cr("Error: Cannot print dynamic libraries."); #endif -#else - char fname[32]; - pid_t pid = os::Bsd::gettid(); - - jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid); - - if (!_print_ascii_file(fname, st)) { - st->print("Can not get library information for pid = %d\n", pid); - } -#endif } void os::print_os_info_brief(outputStream* st) { @@ -2374,22 +1607,10 @@ void os::print_memory_info(outputStream* st) { st->print("Memory:"); st->print(" %dk page", os::vm_page_size()>>10); -#ifndef _ALLBSD_SOURCE - // values in struct sysinfo are "unsigned long" - struct sysinfo si; - sysinfo(&si); -#endif - st->print(", physical " UINT64_FORMAT "k", os::physical_memory() >> 10); st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); -#ifndef _ALLBSD_SOURCE - st->print(", swap " UINT64_FORMAT "k", - ((jlong)si.totalswap * si.mem_unit) >> 10); - st->print("(" UINT64_FORMAT "k free)", - ((jlong)si.freeswap * si.mem_unit) >> 10); -#endif st->cr(); // meminfo @@ -2786,42 +2007,13 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) { #endif } -#ifndef _ALLBSD_SOURCE -// Define MAP_HUGETLB here so we can build HotSpot on old systems. -#ifndef MAP_HUGETLB -#define MAP_HUGETLB 0x40000 -#endif - -// Define MADV_HUGEPAGE here so we can build HotSpot on old systems. -#ifndef MADV_HUGEPAGE -#define MADV_HUGEPAGE 14 -#endif -#endif bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { -#ifndef _ALLBSD_SOURCE - if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { - int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; - uintptr_t res = - (uintptr_t) ::mmap(addr, size, prot, - MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB, - -1, 0); - return res != (uintptr_t) MAP_FAILED; - } -#endif - return commit_memory(addr, size, exec); } void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { -#ifndef _ALLBSD_SOURCE - if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { - // We don't check the return value: madvise(MADV_HUGEPAGE) may not - // be supported or the memory may already be backed by huge pages. - ::madvise(addr, bytes, MADV_HUGEPAGE); - } -#endif } void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { @@ -2860,111 +2052,6 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info return end; } -#ifndef _ALLBSD_SOURCE -// Something to do with the numa-aware allocator needs these symbols -extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { } -extern "C" JNIEXPORT void numa_error(char *where) { } -extern "C" JNIEXPORT int fork1() { return fork(); } - - -// If we are running with libnuma version > 2, then we should -// be trying to use symbols with versions 1.1 -// If we are running with earlier version, which did not have symbol versions, -// we should use the base version. -void* os::Bsd::libnuma_dlsym(void* handle, const char *name) { - void *f = dlvsym(handle, name, "libnuma_1.1"); - if (f == NULL) { - f = dlsym(handle, name); - } - return f; -} - -bool os::Bsd::libnuma_init() { - // sched_getcpu() should be in libc. - set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, - dlsym(RTLD_DEFAULT, "sched_getcpu"))); - - if (sched_getcpu() != -1) { // Does it work? - void *handle = dlopen("libnuma.so.1", RTLD_LAZY); - if (handle != NULL) { - set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t, - libnuma_dlsym(handle, "numa_node_to_cpus"))); - set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t, - libnuma_dlsym(handle, "numa_max_node"))); - set_numa_available(CAST_TO_FN_PTR(numa_available_func_t, - libnuma_dlsym(handle, "numa_available"))); - set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t, - libnuma_dlsym(handle, "numa_tonode_memory"))); - set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, - libnuma_dlsym(handle, "numa_interleave_memory"))); - - - if (numa_available() != -1) { - set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes")); - // Create a cpu -> node mapping - _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray(0, true); - rebuild_cpu_to_node_map(); - return true; - } - } - } - return false; -} - -// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id. -// The table is later used in get_node_by_cpu(). -void os::Bsd::rebuild_cpu_to_node_map() { - const size_t NCPUS = 32768; // Since the buffer size computation is very obscure - // in libnuma (possible values are starting from 16, - // and continuing up with every other power of 2, but less - // than the maximum number of CPUs supported by kernel), and - // is a subject to change (in libnuma version 2 the requirements - // are more reasonable) we'll just hardcode the number they use - // in the library. - const size_t BitsPerCLong = sizeof(long) * CHAR_BIT; - - size_t cpu_num = os::active_processor_count(); - size_t cpu_map_size = NCPUS / BitsPerCLong; - size_t cpu_map_valid_size = - MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size); - - cpu_to_node()->clear(); - cpu_to_node()->at_grow(cpu_num - 1); - size_t node_num = numa_get_groups_num(); - - unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size); - for (size_t i = 0; i < node_num; i++) { - if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) { - for (size_t j = 0; j < cpu_map_valid_size; j++) { - if (cpu_map[j] != 0) { - for (size_t k = 0; k < BitsPerCLong; k++) { - if (cpu_map[j] & (1UL << k)) { - cpu_to_node()->at_put(j * BitsPerCLong + k, i); - } - } - } - } - } - } - FREE_C_HEAP_ARRAY(unsigned long, cpu_map); -} - -int os::Bsd::get_node_by_cpu(int cpu_id) { - if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) { - return cpu_to_node()->at(cpu_id); - } - return -1; -} - -GrowableArray* os::Bsd::_cpu_to_node; -os::Bsd::sched_getcpu_func_t os::Bsd::_sched_getcpu; -os::Bsd::numa_node_to_cpus_func_t os::Bsd::_numa_node_to_cpus; -os::Bsd::numa_max_node_func_t os::Bsd::_numa_max_node; -os::Bsd::numa_available_func_t os::Bsd::_numa_available; -os::Bsd::numa_tonode_memory_func_t os::Bsd::_numa_tonode_memory; -os::Bsd::numa_interleave_memory_func_t os::Bsd::_numa_interleave_memory; -unsigned long* os::Bsd::_numa_all_nodes; -#endif bool os::pd_uncommit_memory(char* addr, size_t size) { #ifdef __OpenBSD__ @@ -3084,42 +2171,7 @@ bool os::unguard_memory(char* addr, size_t size) { } bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) { - bool result = false; -#ifndef _ALLBSD_SOURCE - void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE, - MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, - -1, 0); - - if (p != (void *) -1) { - // We don't know if this really is a huge page or not. - FILE *fp = fopen("/proc/self/maps", "r"); - if (fp) { - while (!feof(fp)) { - char chars[257]; - long x = 0; - if (fgets(chars, sizeof(chars), fp)) { - if (sscanf(chars, "%lx-%*x", &x) == 1 - && x == (long)p) { - if (strstr (chars, "hugepage")) { - result = true; - break; - } - } - } - } - fclose(fp); - } - munmap (p, page_size); - if (result) - return true; - } - - if (warn) { - warning("HugeTLBFS is not supported by the operating system."); - } -#endif - - return result; + return false; } /* @@ -3164,92 +2216,8 @@ static void set_coredump_filter(void) { static size_t _large_page_size = 0; void os::large_page_init() { -#ifndef _ALLBSD_SOURCE - if (!UseLargePages) { - UseHugeTLBFS = false; - UseSHM = false; - return; - } - - if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) { - // If UseLargePages is specified on the command line try both methods, - // if it's default, then try only HugeTLBFS. - if (FLAG_IS_DEFAULT(UseLargePages)) { - UseHugeTLBFS = true; - } else { - UseHugeTLBFS = UseSHM = true; - } - } - - if (LargePageSizeInBytes) { - _large_page_size = LargePageSizeInBytes; - } else { - // large_page_size on Bsd is used to round up heap size. x86 uses either - // 2M or 4M page, depending on whether PAE (Physical Address Extensions) - // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use - // page as large as 256M. - // - // Here we try to figure out page size by parsing /proc/meminfo and looking - // for a line with the following format: - // Hugepagesize: 2048 kB - // - // If we can't determine the value (e.g. /proc is not mounted, or the text - // format has been changed), we'll use the largest page size supported by - // the processor. - -#ifndef ZERO - _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M) - ARM_ONLY(2 * M) PPC_ONLY(4 * M); -#endif // ZERO - - FILE *fp = fopen("/proc/meminfo", "r"); - if (fp) { - while (!feof(fp)) { - int x = 0; - char buf[16]; - if (fscanf(fp, "Hugepagesize: %d", &x) == 1) { - if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) { - _large_page_size = x * K; - break; - } - } else { - // skip to next line - for (;;) { - int ch = fgetc(fp); - if (ch == EOF || ch == (int)'\n') break; - } - } - } - fclose(fp); - } - } - - // print a warning if any large page related flag is specified on command line - bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS); - - const size_t default_page_size = (size_t)Bsd::page_size(); - if (_large_page_size > default_page_size) { - _page_sizes[0] = _large_page_size; - _page_sizes[1] = default_page_size; - _page_sizes[2] = 0; - } - UseHugeTLBFS = UseHugeTLBFS && - Bsd::hugetlbfs_sanity_check(warn_on_failure, _large_page_size); - - if (UseHugeTLBFS) - UseSHM = false; - - UseLargePages = UseHugeTLBFS || UseSHM; - - set_coredump_filter(); -#endif } -#ifndef _ALLBSD_SOURCE -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif -#endif char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) { // "exec" is passed in but not used. Creating the shared image for @@ -3267,11 +2235,7 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) { // Create a large shared memory region to attach to based on size. // Currently, size is the total size of the heap -#ifndef _ALLBSD_SOURCE - int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); -#else int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W); -#endif if (shmid == -1) { // Possible reasons for shmget failure: // 1. shmmax is too small for Java heap. @@ -3558,7 +2522,7 @@ void os::loop_breaker(int attempts) { // this reason, the code should not be used as default (ThreadPriorityPolicy=0). // It is only used when ThreadPriorityPolicy=1 and requires root privilege. -#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) +#if !defined(__APPLE__) int os::java_to_os_priority[CriticalPriority + 1] = { 19, // 0 Entry should never be used @@ -3578,7 +2542,7 @@ int os::java_to_os_priority[CriticalPriority + 1] = { 31 // 11 CriticalPriority }; -#elif defined(__APPLE__) +#else /* Using Mach high-level priority assignments */ int os::java_to_os_priority[CriticalPriority + 1] = { 0, // 0 Entry should never be used (MINPRI_USER) @@ -3599,26 +2563,6 @@ int os::java_to_os_priority[CriticalPriority + 1] = { 36 // 11 CriticalPriority }; -#else -int os::java_to_os_priority[CriticalPriority + 1] = { - 19, // 0 Entry should never be used - - 4, // 1 MinPriority - 3, // 2 - 2, // 3 - - 1, // 4 - 0, // 5 NormPriority - -1, // 6 - - -2, // 7 - -3, // 8 - -4, // 9 NearMaxPriority - - -5, // 10 MaxPriority - - -5 // 11 CriticalPriority -}; #endif static int prio_init() { @@ -4179,22 +3123,6 @@ void os::Bsd::install_signal_handlers() { } } -#ifndef _ALLBSD_SOURCE -// This is the fastest way to get thread cpu time on Bsd. -// Returns cpu time (user+sys) for any thread, not only for current. -// POSIX compliant clocks are implemented in the kernels 2.6.16+. -// It might work on 2.6.10+ with a special kernel/glibc patch. -// For reference, please, see IEEE Std 1003.1-2004: -// http://www.unix.org/single_unix_specification - -jlong os::Bsd::fast_thread_cpu_time(clockid_t clockid) { - struct timespec tp; - int rc = os::Bsd::clock_gettime(clockid, &tp); - assert(rc == 0, "clock_gettime is expected to return 0 code"); - - return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec; -} -#endif ///// // glibc on Bsd platform uses non-documented flag @@ -4458,10 +3386,6 @@ extern "C" { // this is called _after_ the global arguments have been parsed jint os::init_2(void) { -#ifndef _ALLBSD_SOURCE - Bsd::fast_thread_clock_init(); -#endif - // Allocate a single page and mark it as readable for safepoint polling address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" ); @@ -4518,48 +3442,6 @@ jint os::init_2(void) JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size())); -#ifndef _ALLBSD_SOURCE - Bsd::capture_initial_stack(JavaThread::stack_size_at_create()); - - Bsd::libpthread_init(); - if (PrintMiscellaneous && (Verbose || WizardMode)) { - tty->print_cr("[HotSpot is running with %s, %s(%s)]\n", - Bsd::glibc_version(), Bsd::libpthread_version(), - Bsd::is_floating_stack() ? "floating stack" : "fixed stack"); - } - - if (UseNUMA) { - if (!Bsd::libnuma_init()) { - UseNUMA = false; - } else { - if ((Bsd::numa_max_node() < 1)) { - // There's only one node(they start from 0), disable NUMA. - UseNUMA = false; - } - } - // With SHM large pages we cannot uncommit a page, so there's not way - // we can make the adaptive lgrp chunk resizing work. If the user specified - // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and - // disable adaptive resizing. - if (UseNUMA && UseLargePages && UseSHM) { - if (!FLAG_IS_DEFAULT(UseNUMA)) { - if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) { - UseLargePages = false; - } else { - warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing"); - UseAdaptiveSizePolicy = false; - UseAdaptiveNUMAChunkSizing = false; - } - } else { - UseNUMA = false; - } - } - if (!UseNUMA && ForceNUMA) { - UseNUMA = true; - } - } -#endif - if (MaxFDLimit) { // set the number of file descriptors to max. print out error // if getrlimit/setrlimit fails but continue regardless. @@ -4586,11 +3468,6 @@ jint os::init_2(void) } } -#ifndef _ALLBSD_SOURCE - // Initialize lock used to serialize thread creation (see os::create_thread) - Bsd::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false)); -#endif - // at-exit methods are called in the reverse order of their registration. // atexit functions are called on return from main or as a result of a // call to exit(3C). There can be only 32 of these functions registered @@ -4641,15 +3518,7 @@ void os::make_polling_page_readable(void) { }; int os::active_processor_count() { -#ifdef _ALLBSD_SOURCE return _processor_count; -#else - // Bsd doesn't yet have a (official) notion of processor sets, - // so just return the number of online processors. - int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); - assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check"); - return online_cpus; -#endif } void os::set_native_thread_name(const char *name) { @@ -4703,25 +3572,7 @@ ExtendedPC os::get_thread_pc(Thread* thread) { int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) { -#ifdef _ALLBSD_SOURCE return pthread_cond_timedwait(_cond, _mutex, _abstime); -#else - if (is_NPTL()) { - return pthread_cond_timedwait(_cond, _mutex, _abstime); - } else { -#ifndef IA64 - // 6292965: BsdThreads pthread_cond_timedwait() resets FPU control - // word back to default 64bit precision if condvar is signaled. Java - // wants 53bit precision. Save and restore current value. - int fpu = get_fpu_control_word(); -#endif // IA64 - int status = pthread_cond_timedwait(_cond, _mutex, _abstime); -#ifndef IA64 - set_fpu_control_word(fpu); -#endif // IA64 - return status; - } -#endif } //////////////////////////////////////////////////////////////////////////////// @@ -5041,20 +3892,6 @@ bool os::pd_unmap_memory(char* addr, size_t bytes) { return munmap(addr, bytes) == 0; } -#ifndef _ALLBSD_SOURCE -static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time); - -static clockid_t thread_cpu_clockid(Thread* thread) { - pthread_t tid = thread->osthread()->pthread_id(); - clockid_t clockid; - - // Get thread clockid - int rc = os::Bsd::pthread_getcpuclockid(tid, &clockid); - assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code"); - return clockid; -} -#endif - // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) // are used by JVM M&M and JVMTI to get user+sys or user CPU time // of a thread. @@ -5065,36 +3902,15 @@ static clockid_t thread_cpu_clockid(Thread* thread) { jlong os::current_thread_cpu_time() { #ifdef __APPLE__ return os::thread_cpu_time(Thread::current(), true /* user + sys */); -#elif !defined(_ALLBSD_SOURCE) - if (os::Bsd::supports_fast_thread_cpu_time()) { - return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); - } else { - // return user + sys since the cost is the same - return slow_thread_cpu_time(Thread::current(), true /* user + sys */); - } #endif } jlong os::thread_cpu_time(Thread* thread) { -#ifndef _ALLBSD_SOURCE - // consistent with what current_thread_cpu_time() returns - if (os::Bsd::supports_fast_thread_cpu_time()) { - return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread)); - } else { - return slow_thread_cpu_time(thread, true /* user + sys */); - } -#endif } jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { #ifdef __APPLE__ return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); -#elif !defined(_ALLBSD_SOURCE) - if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) { - return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); - } else { - return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time); - } #endif } @@ -5118,106 +3934,9 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { } else { return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000); } -#elif !defined(_ALLBSD_SOURCE) - if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) { - return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread)); - } else { - return slow_thread_cpu_time(thread, user_sys_cpu_time); - } #endif } -#ifndef _ALLBSD_SOURCE -// -// -1 on error. -// - -static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { - static bool proc_pid_cpu_avail = true; - static bool proc_task_unchecked = true; - static const char *proc_stat_path = "/proc/%d/stat"; - pid_t tid = thread->osthread()->thread_id(); - int i; - char *s; - char stat[2048]; - int statlen; - char proc_name[64]; - int count; - long sys_time, user_time; - char string[64]; - char cdummy; - int idummy; - long ldummy; - FILE *fp; - - // We first try accessing /proc//cpu since this is faster to - // process. If this file is not present (bsd kernels 2.5 and above) - // then we open /proc//stat. - if ( proc_pid_cpu_avail ) { - sprintf(proc_name, "/proc/%d/cpu", tid); - fp = fopen(proc_name, "r"); - if ( fp != NULL ) { - count = fscanf( fp, "%s %lu %lu\n", string, &user_time, &sys_time); - fclose(fp); - if ( count != 3 ) return -1; - - if (user_sys_cpu_time) { - return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec); - } else { - return (jlong)user_time * (1000000000 / clock_tics_per_sec); - } - } - else proc_pid_cpu_avail = false; - } - - // The /proc//stat aggregates per-process usage on - // new Bsd kernels 2.6+ where NPTL is supported. - // The /proc/self/task//stat still has the per-thread usage. - // See bug 6328462. - // There can be no directory /proc/self/task on kernels 2.4 with NPTL - // and possibly in some other cases, so we check its availability. - if (proc_task_unchecked && os::Bsd::is_NPTL()) { - // This is executed only once - proc_task_unchecked = false; - fp = fopen("/proc/self/task", "r"); - if (fp != NULL) { - proc_stat_path = "/proc/self/task/%d/stat"; - fclose(fp); - } - } - - sprintf(proc_name, proc_stat_path, tid); - fp = fopen(proc_name, "r"); - if ( fp == NULL ) return -1; - statlen = fread(stat, 1, 2047, fp); - stat[statlen] = '\0'; - fclose(fp); - - // Skip pid and the command string. Note that we could be dealing with - // weird command names, e.g. user could decide to rename java launcher - // to "java 1.4.2 :)", then the stat file would look like - // 1234 (java 1.4.2 :)) R ... ... - // We don't really need to know the command string, just find the last - // occurrence of ")" and then start parsing from there. See bug 4726580. - s = strrchr(stat, ')'); - i = 0; - if (s == NULL ) return -1; - - // Skip blank chars - do s++; while (isspace(*s)); - - count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu", - &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy, - &ldummy, &ldummy, &ldummy, &ldummy, &ldummy, - &user_time, &sys_time); - if ( count != 13 ) return -1; - if (user_sys_cpu_time) { - return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec); - } else { - return (jlong)user_time * (1000000000 / clock_tics_per_sec); - } -} -#endif void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits @@ -5236,10 +3955,8 @@ void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { bool os::is_thread_cpu_time_supported() { #ifdef __APPLE__ return true; -#elif defined(_ALLBSD_SOURCE) - return false; #else - return true; + return false; #endif } diff --git a/src/os/bsd/vm/os_bsd.hpp b/src/os/bsd/vm/os_bsd.hpp index d9ab28afcc4431cc8ed22ddf34d397b92bc4c10f..ebdbdc22a037fe7c6a5ef0d504c1f846aa1e1fe3 100644 --- a/src/os/bsd/vm/os_bsd.hpp +++ b/src/os/bsd/vm/os_bsd.hpp @@ -56,19 +56,6 @@ class Bsd { static int sigflags[MAXSIGNUM]; static int (*_clock_gettime)(clockid_t, struct timespec *); -#ifndef _ALLBSD_SOURCE - static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *); - - static address _initial_thread_stack_bottom; - static uintptr_t _initial_thread_stack_size; - - static const char *_glibc_version; - static const char *_libpthread_version; - - static bool _is_floating_stack; - static bool _is_NPTL; - static bool _supports_fast_thread_cpu_time; -#endif static GrowableArray* _cpu_to_node; @@ -76,28 +63,14 @@ class Bsd { static julong _physical_memory; static pthread_t _main_thread; -#ifndef _ALLBSD_SOURCE - static Mutex* _createThread_lock; -#endif static int _page_size; static julong available_memory(); static julong physical_memory() { return _physical_memory; } static void initialize_system_info(); -#ifndef _ALLBSD_SOURCE - static void set_glibc_version(const char *s) { _glibc_version = s; } - static void set_libpthread_version(const char *s) { _libpthread_version = s; } -#endif - static bool supports_variable_stack_size(); -#ifndef _ALLBSD_SOURCE - static void set_is_NPTL() { _is_NPTL = true; } - static void set_is_BsdThreads() { _is_NPTL = false; } - static void set_is_floating_stack() { _is_floating_stack = true; } -#endif - static void rebuild_cpu_to_node_map(); static GrowableArray* cpu_to_node() { return _cpu_to_node; } @@ -106,25 +79,10 @@ class Bsd { public: static void init_thread_fpu_state(); -#ifndef _ALLBSD_SOURCE - static int get_fpu_control_word(); - static void set_fpu_control_word(int fpu_control); -#endif static pthread_t main_thread(void) { return _main_thread; } -#ifndef _ALLBSD_SOURCE - // returns kernel thread id (similar to LWP id on Solaris), which can be - // used to access /proc - static pid_t gettid(); - static void set_createThread_lock(Mutex* lk) { _createThread_lock = lk; } - static Mutex* createThread_lock(void) { return _createThread_lock; } -#endif static void hotspot_sigmask(Thread* thread); -#ifndef _ALLBSD_SOURCE - static address initial_thread_stack_bottom(void) { return _initial_thread_stack_bottom; } - static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; } -#endif static bool is_initial_thread(void); static int page_size(void) { return _page_size; } @@ -161,23 +119,6 @@ class Bsd { static struct sigaction *get_chained_signal_action(int sig); static bool chained_handler(int sig, siginfo_t* siginfo, void* context); -#ifndef _ALLBSD_SOURCE - // GNU libc and libpthread version strings - static const char *glibc_version() { return _glibc_version; } - static const char *libpthread_version() { return _libpthread_version; } - - // NPTL or BsdThreads? - static bool is_BsdThreads() { return !_is_NPTL; } - static bool is_NPTL() { return _is_NPTL; } - - // NPTL is always floating stack. BsdThreads could be using floating - // stack or fixed stack. - static bool is_floating_stack() { return _is_floating_stack; } - - static void libpthread_init(); - static bool libnuma_init(); - static void* libnuma_dlsym(void* handle, const char* name); -#endif // Minimum stack size a thread can be created with (allowing // the VM to completely create the thread and enter user code) static size_t min_stack_allowed; @@ -186,22 +127,9 @@ class Bsd { static size_t default_stack_size(os::ThreadType thr_type); static size_t default_guard_size(os::ThreadType thr_type); -#ifndef _ALLBSD_SOURCE - static void capture_initial_stack(size_t max_size); - - // Stack overflow handling - static bool manually_expand_stack(JavaThread * t, address addr); - static int max_register_window_saves_before_flushing(); -#endif - // Real-time clock functions static void clock_init(void); -#ifndef _ALLBSD_SOURCE - // fast POSIX clocks support - static void fast_thread_clock_init(void); -#endif - static inline bool supports_monotonic_clock() { return _clock_gettime != NULL; } @@ -210,18 +138,6 @@ class Bsd { return _clock_gettime ? _clock_gettime(clock_id, tp) : -1; } -#ifndef _ALLBSD_SOURCE - static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) { - return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1; - } - - static bool supports_fast_thread_cpu_time() { - return _supports_fast_thread_cpu_time; - } - - static jlong fast_thread_cpu_time(clockid_t clockid); -#endif - // Stack repair handling // none present diff --git a/src/os/windows/vm/os_windows.cpp b/src/os/windows/vm/os_windows.cpp index dc31e27a6b720401123c168f229a1e469849ff83..420368f101097e5b994c22085d8ec975ec7a68b3 100644 --- a/src/os/windows/vm/os_windows.cpp +++ b/src/os/windows/vm/os_windows.cpp @@ -22,7 +22,7 @@ * */ -// Must be at least Windows 2000 or XP to use VectoredExceptions and IsDebuggerPresent +// Must be at least Windows 2000 or XP to use IsDebuggerPresent #define _WIN32_WINNT 0x500 // no precompiled headers @@ -110,10 +110,6 @@ static FILETIME process_exit_time; static FILETIME process_user_time; static FILETIME process_kernel_time; -#ifdef _WIN64 -PVOID topLevelVectoredExceptionHandler = NULL; -#endif - #ifdef _M_IA64 #define __CPU__ ia64 #elif _M_AMD64 @@ -136,12 +132,6 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { case DLL_PROCESS_DETACH: if(ForceTimeHighResolution) timeEndPeriod(1L); -#ifdef _WIN64 - if (topLevelVectoredExceptionHandler != NULL) { - RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler); - topLevelVectoredExceptionHandler = NULL; - } -#endif break; default: break; @@ -408,20 +398,14 @@ static unsigned __stdcall java_start(Thread* thread) { } - if (UseVectoredExceptions) { - // If we are using vectored exception we don't need to set a SEH - thread->run(); - } - else { - // Install a win32 structured exception handler around every thread created - // by VM, so VM can genrate error dump when an exception occurred in non- - // Java thread (e.g. VM thread). - __try { - thread->run(); - } __except(topLevelExceptionFilter( - (_EXCEPTION_POINTERS*)_exception_info())) { - // Nothing to do. - } + // Install a win32 structured exception handler around every thread created + // by VM, so VM can genrate error dump when an exception occurred in non- + // Java thread (e.g. VM thread). + __try { + thread->run(); + } __except(topLevelExceptionFilter( + (_EXCEPTION_POINTERS*)_exception_info())) { + // Nothing to do. } // One less thread is executing @@ -2489,16 +2473,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { } #endif -#ifdef _WIN64 - // Windows will sometimes generate an access violation - // when we call malloc. Since we use VectoredExceptions - // on 64 bit platforms, we see this exception. We must - // pass this exception on so Windows can recover. - // We check to see if the pc of the fault is in NTDLL.DLL - // if so, we pass control on to Windows for handling. - if (UseVectoredExceptions && _addr_in_ntdll(pc)) return EXCEPTION_CONTINUE_SEARCH; -#endif - // Stack overflow or null pointer exception in native code. report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, exceptionInfo->ContextRecord); @@ -2527,30 +2501,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { } if (exception_code != EXCEPTION_BREAKPOINT) { -#ifndef _WIN64 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, exceptionInfo->ContextRecord); -#else - // Itanium Windows uses a VectoredExceptionHandler - // Which means that C++ programatic exception handlers (try/except) - // will get here. Continue the search for the right except block if - // the exception code is not a fatal code. - switch ( exception_code ) { - case EXCEPTION_ACCESS_VIOLATION: - case EXCEPTION_STACK_OVERFLOW: - case EXCEPTION_ILLEGAL_INSTRUCTION: - case EXCEPTION_ILLEGAL_INSTRUCTION_2: - case EXCEPTION_INT_OVERFLOW: - case EXCEPTION_INT_DIVIDE_BY_ZERO: - case EXCEPTION_UNCAUGHT_CXX_EXCEPTION: - { report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, - exceptionInfo->ContextRecord); - } - break; - default: - break; - } -#endif } return EXCEPTION_CONTINUE_SEARCH; } @@ -3706,18 +3658,6 @@ jint os::init_2(void) { // Setup Windows Exceptions - // On Itanium systems, Structured Exception Handling does not - // work since stack frames must be walkable by the OS. Since - // much of our code is dynamically generated, and we do not have - // proper unwind .xdata sections, the system simply exits - // rather than delivering the exception. To work around - // this we use VectorExceptions instead. -#ifdef _WIN64 - if (UseVectoredExceptions) { - topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter); - } -#endif - // for debugging float code generation bugs if (ForceFloatExceptions) { #ifndef _WIN64 diff --git a/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp b/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp index 4fe770a0e6e62463be0c03fdf70eeb73bb82a25e..c2256d56fd88e069b69b0bcea1f3f54510a04a8a 100644 --- a/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp +++ b/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp @@ -25,10 +25,6 @@ #ifndef OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP #define OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP -#ifndef _ALLBSD_SOURCE -#include -#endif - #ifdef __APPLE__ #include #endif diff --git a/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp b/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp index 5a19fed3f58330b2a2a85bd0d7e93b5dc5da20d2..3a8d42ab68ef65db45261b5aef68461377091dd9 100644 --- a/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp +++ b/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp @@ -48,7 +48,5 @@ define_pd_global(uintx, JVMInvokeMethodSlack, 8192); // Used on 64 bit platforms for UseCompressedOops base address or CDS define_pd_global(uintx, HeapBaseMinAddress, 2*G); -// Only used on 64 bit Windows platforms -define_pd_global(bool, UseVectoredExceptions, false); #endif // OS_CPU_BSD_X86_VM_GLOBALS_BSD_X86_HPP diff --git a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp index b7b3e4c726f1d4fc209b6448e85dc7d2b0a72ed0..49ea265bac892814159781066577537aad8423c7 100644 --- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp +++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp @@ -76,7 +76,7 @@ # include #endif -#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__) +#if !defined(__APPLE__) && !defined(__NetBSD__) # include #endif @@ -489,23 +489,6 @@ JVM_handle_bsd_signal(int sig, // to handle_unexpected_exception way down below. thread->disable_stack_red_zone(); tty->print_raw_cr("An irrecoverable stack overflow has occurred."); -#ifndef _ALLBSD_SOURCE - } else { - // Accessing stack address below sp may cause SEGV if current - // thread has MAP_GROWSDOWN stack. This should only happen when - // current thread was created by user code with MAP_GROWSDOWN flag - // and then attached to VM. See notes in os_bsd.cpp. - if (thread->osthread()->expanding_stack() == 0) { - thread->osthread()->set_expanding_stack(); - if (os::Bsd::manually_expand_stack(thread, addr)) { - thread->osthread()->clear_expanding_stack(); - return 1; - } - thread->osthread()->clear_expanding_stack(); - } else { - fatal("recursive segv. expanding stack."); - } -#endif } } } @@ -744,61 +727,21 @@ JVM_handle_bsd_signal(int sig, ShouldNotReachHere(); } -#ifdef _ALLBSD_SOURCE // From solaris_i486.s ported to bsd_i486.s extern "C" void fixcw(); -#endif void os::Bsd::init_thread_fpu_state(void) { #ifndef AMD64 -# ifdef _ALLBSD_SOURCE // Set fpu to 53 bit precision. This happens too early to use a stub. fixcw(); -# else - // set fpu to 53 bit precision - set_fpu_control_word(0x27f); -# endif #endif // !AMD64 } -#ifndef _ALLBSD_SOURCE -int os::Bsd::get_fpu_control_word(void) { -#ifdef AMD64 - return 0; -#else - int fpu_control; - _FPU_GETCW(fpu_control); - return fpu_control & 0xffff; -#endif // AMD64 -} - -void os::Bsd::set_fpu_control_word(int fpu_control) { -#ifndef AMD64 - _FPU_SETCW(fpu_control); -#endif // !AMD64 -} -#endif // Check that the bsd kernel version is 2.4 or higher since earlier // versions do not support SSE without patches. bool os::supports_sse() { -#if defined(AMD64) || defined(_ALLBSD_SOURCE) return true; -#else - struct utsname uts; - if( uname(&uts) != 0 ) return false; // uname fails? - char *minor_string; - int major = strtol(uts.release,&minor_string,10); - int minor = strtol(minor_string+1,NULL,10); - bool result = (major > 2 || (major==2 && minor >= 4)); -#ifndef PRODUCT - if (PrintMiscellaneous && Verbose) { - tty->print("OS version is %d.%d, which %s support SSE/SSE2\n", - major,minor, result ? "DOES" : "does NOT"); - } -#endif - return result; -#endif // AMD64 } bool os::is_allocatable(size_t bytes) { @@ -836,46 +779,7 @@ size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) #endif -#ifdef _ALLBSD_SOURCE bool os::Bsd::supports_variable_stack_size() { return true; } -#else -// Test if pthread library can support variable thread stack size. BsdThreads -// in fixed stack mode allocates 2M fixed slot for each thread. BsdThreads -// in floating stack mode and NPTL support variable stack size. -bool os::Bsd::supports_variable_stack_size() { - if (os::Bsd::is_NPTL()) { - // NPTL, yes - return true; - - } else { - // Note: We can't control default stack size when creating a thread. - // If we use non-default stack size (pthread_attr_setstacksize), both - // floating stack and non-floating stack BsdThreads will return the - // same value. This makes it impossible to implement this function by - // detecting thread stack size directly. - // - // An alternative approach is to check %gs. Fixed-stack BsdThreads - // do not use %gs, so its value is 0. Floating-stack BsdThreads use - // %gs (either as LDT selector or GDT selector, depending on kernel) - // to access thread specific data. - // - // Note that %gs is a reserved glibc register since early 2001, so - // applications are not allowed to change its value (Ulrich Drepper from - // Redhat confirmed that all known offenders have been modified to use - // either %fs or TSD). In the worst case scenario, when VM is embedded in - // a native application that plays with %gs, we might see non-zero %gs - // even BsdThreads is running in fixed stack mode. As the result, we'll - // return true and skip _thread_safety_check(), so we may not be able to - // detect stack-heap collisions. But otherwise it's harmless. - // -#ifdef __GNUC__ - return (GET_GS() != 0); -#else - return false; -#endif - } -} -#endif #endif // AMD64 // return default stack size for thr_type @@ -943,7 +847,7 @@ static void current_stack_region(address * bottom, size_t * size) { *bottom = (address)((char *)ss.ss_sp - ss.ss_size); *size = ss.ss_size; -#elif defined(_ALLBSD_SOURCE) +#else pthread_attr_t attr; int rslt = pthread_attr_init(&attr); @@ -963,33 +867,6 @@ static void current_stack_region(address * bottom, size_t * size) { } pthread_attr_destroy(&attr); -#else - if (os::Bsd::is_initial_thread()) { - // initial thread needs special handling because pthread_getattr_np() - // may return bogus value. - *bottom = os::Bsd::initial_thread_stack_bottom(); - *size = os::Bsd::initial_thread_stack_size(); - } else { - pthread_attr_t attr; - - int rslt = pthread_getattr_np(pthread_self(), &attr); - - // JVM needs to know exact stack location, abort if it fails - if (rslt != 0) { - if (rslt == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); - } else { - fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); - } - } - - if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) { - fatal("Can not locate current stack attributes!"); - } - - pthread_attr_destroy(&attr); - - } #endif assert(os::current_stack_pointer() >= *bottom && os::current_stack_pointer() < *bottom + *size, "just checking"); diff --git a/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp b/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp index 6b1f6af5efc7480c37a58b7466e7d9529a0991f4..9c988eb743b9a1505209941fdfc7fc6f4b126044 100644 --- a/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp +++ b/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp @@ -41,7 +41,6 @@ define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, CompilerThreadStackSize, 0); define_pd_global(uintx, JVMInvokeMethodSlack, 8192); -define_pd_global(bool, UseVectoredExceptions, false); // Used on 64 bit platforms for UseCompressedOops base address or CDS define_pd_global(uintx, HeapBaseMinAddress, 2*G); diff --git a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp index 51ea6e3f24e99fd3cdefab96422afa9dd5da0084..2262ee2079b20c748c12944bc08fb97c72e0e0b7 100644 --- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp +++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp @@ -23,7 +23,7 @@ * */ -#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__) +#if !defined(__APPLE__) && !defined(__NetBSD__) #include # include /* For pthread_attr_get_np */ #endif @@ -178,26 +178,6 @@ JVM_handle_bsd_signal(int sig, thread->disable_stack_red_zone(); ShouldNotCallThis(); } -#ifndef _ALLBSD_SOURCE - else { - // Accessing stack address below sp may cause SEGV if - // current thread has MAP_GROWSDOWN stack. This should - // only happen when current thread was created by user - // code with MAP_GROWSDOWN flag and then attached to VM. - // See notes in os_bsd.cpp. - if (thread->osthread()->expanding_stack() == 0) { - thread->osthread()->set_expanding_stack(); - if (os::Bsd::manually_expand_stack(thread, addr)) { - thread->osthread()->clear_expanding_stack(); - return true; - } - thread->osthread()->clear_expanding_stack(); - } - else { - fatal("recursive segv. expanding stack."); - } - } -#endif } } @@ -266,16 +246,6 @@ void os::Bsd::init_thread_fpu_state(void) { // Nothing to do } -#ifndef _ALLBSD_SOURCE -int os::Bsd::get_fpu_control_word() { - ShouldNotCallThis(); -} - -void os::Bsd::set_fpu_control_word(int fpu) { - ShouldNotCallThis(); -} -#endif - bool os::is_allocatable(size_t bytes) { #ifdef _LP64 return true; @@ -339,7 +309,7 @@ static void current_stack_region(address *bottom, size_t *size) { stack_top = (address) ss.ss_sp; stack_bytes = ss.ss_size; stack_bottom = stack_top - stack_bytes; -#elif defined(_ALLBSD_SOURCE) +#else pthread_attr_t attr; int rslt = pthread_attr_init(&attr); @@ -362,67 +332,6 @@ static void current_stack_region(address *bottom, size_t *size) { pthread_attr_destroy(&attr); stack_top = stack_bottom + stack_bytes; -#else /* Linux */ - pthread_attr_t attr; - int res = pthread_getattr_np(pthread_self(), &attr); - if (res != 0) { - if (res == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); - } - else { - fatal(err_msg("pthread_getattr_np failed with errno = " INT32_FORMAT, - res)); - } - } - - res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes); - if (res != 0) { - fatal(err_msg("pthread_attr_getstack failed with errno = " INT32_FORMAT, - res)); - } - stack_top = stack_bottom + stack_bytes; - - // The block of memory returned by pthread_attr_getstack() includes - // guard pages where present. We need to trim these off. - size_t page_bytes = os::Bsd::page_size(); - assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack"); - - size_t guard_bytes; - res = pthread_attr_getguardsize(&attr, &guard_bytes); - if (res != 0) { - fatal(err_msg( - "pthread_attr_getguardsize failed with errno = " INT32_FORMAT, res)); - } - int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes; - assert(guard_bytes == guard_pages * page_bytes, "unaligned guard"); - -#ifdef IA64 - // IA64 has two stacks sharing the same area of memory, a normal - // stack growing downwards and a register stack growing upwards. - // Guard pages, if present, are in the centre. This code splits - // the stack in two even without guard pages, though in theory - // there's nothing to stop us allocating more to the normal stack - // or more to the register stack if one or the other were found - // to grow faster. - int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes; - stack_bottom += (total_pages - guard_pages) / 2 * page_bytes; -#endif // IA64 - - stack_bottom += guard_bytes; - - pthread_attr_destroy(&attr); - - // The initial thread has a growable stack, and the size reported - // by pthread_attr_getstack is the maximum size it could possibly - // be given what currently mapped. This can be huge, so we cap it. - if (os::Bsd::is_initial_thread()) { - stack_bytes = stack_top - stack_bottom; - - if (stack_bytes > JavaThread::stack_size_at_create()) - stack_bytes = JavaThread::stack_size_at_create(); - - stack_bottom = stack_top - stack_bytes; - } #endif assert(os::current_stack_pointer() >= stack_bottom, "should do"); diff --git a/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp b/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp index ab9d8cdeb250646e1dfa055b5ff4b62a670f5002..4ac5ead19461f8f6c64799d54a7cec7edeb05b64 100644 --- a/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp +++ b/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp @@ -35,7 +35,5 @@ define_pd_global(intx, CompilerThreadStackSize, 0); // Used on 64 bit platforms for UseCompressedOops base address or CDS define_pd_global(uintx, HeapBaseMinAddress, CONST64(4)*G); -// Only used on 64 bit Windows platforms -define_pd_global(bool, UseVectoredExceptions, false); #endif // OS_CPU_LINUX_SPARC_VM_GLOBALS_LINUX_SPARC_HPP diff --git a/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp b/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp index a7c94c00f829f5a44ba641466a8db951016c609d..b11a6f3aa276aaa5eee24047d5a56d0807cea476 100644 --- a/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp +++ b/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp @@ -46,7 +46,5 @@ define_pd_global(uintx,JVMInvokeMethodSlack, 8192); // Used on 64 bit platforms for UseCompressedOops base address or CDS define_pd_global(uintx,HeapBaseMinAddress, 2*G); -// Only used on 64 bit Windows platforms -define_pd_global(bool, UseVectoredExceptions, false); #endif // OS_CPU_LINUX_X86_VM_GLOBALS_LINUX_X86_HPP diff --git a/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp b/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp index 14003011d3ed3614105d2ae364d6c4abec3d33b8..56495d176d1d1b1812ac912a77842edd63412be7 100644 --- a/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp +++ b/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp @@ -41,7 +41,6 @@ define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, CompilerThreadStackSize, 0); define_pd_global(uintx, JVMInvokeMethodSlack, 8192); -define_pd_global(bool, UseVectoredExceptions, false); // Used on 64 bit platforms for UseCompressedOops base address or CDS define_pd_global(uintx, HeapBaseMinAddress, 2*G); diff --git a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp index 76695e9442c02c26c4ee6a9f685546e17260e81f..e6cb0dddb01c9bb50ec841121d8466ea119950a1 100644 --- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp +++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp @@ -39,8 +39,6 @@ define_pd_global(uintx, HeapBaseMinAddress, CONST64(4)*G); #else define_pd_global(uintx, HeapBaseMinAddress, 2*G); #endif -// Only used on 64 bit Windows platforms -define_pd_global(bool, UseVectoredExceptions, false); diff --git a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp index 780df544cc42c9484d6559c625d969817e5e606b..5d99a09c447c83334543551dfeaefe318db54e3f 100644 --- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp +++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp @@ -45,7 +45,5 @@ define_pd_global(intx, CompilerThreadStackSize, 0); // Used on 64 bit platforms for UseCompressedOops base address or CDS define_pd_global(uintx,HeapBaseMinAddress, 256*M); -// Only used on 64 bit Windows platforms -define_pd_global(bool, UseVectoredExceptions, false); #endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP diff --git a/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp b/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp index 57e0ac34c75055a5c8c10471dd18b4be5c672892..f4167f5eb5ca6f661cce1db997a9e36d283bca74 100644 --- a/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp +++ b/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp @@ -47,7 +47,5 @@ define_pd_global(uintx, JVMInvokeMethodSlack, 8192); // Used on 64 bit platforms for UseCompressedOops base address or CDS define_pd_global(uintx, HeapBaseMinAddress, 2*G); -// Only used on 64 bit Windows platforms -define_pd_global(bool, UseVectoredExceptions, false); #endif // OS_CPU_WINDOWS_X86_VM_GLOBALS_WINDOWS_X86_HPP diff --git a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp index 434929f30d24e8e93ab5bb1e77568611b77af55f..d0fad7da799d895b37439c07f0deab04e59a9a36 100644 --- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp @@ -175,9 +175,6 @@ bool os::register_code_area(char *low, char *high) { PRUNTIME_FUNCTION prt; PUNWIND_INFO_EH_ONLY punwind; - // If we are using Vectored Exceptions we don't need this registration - if (UseVectoredExceptions) return true; - BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData)); CodeBuffer cb(blob); MacroAssembler* masm = new MacroAssembler(&cb); diff --git a/src/share/vm/asm/codeBuffer.cpp b/src/share/vm/asm/codeBuffer.cpp index bb1ae18fcb57af6596b50801d1ab688594c3eb65..a3fc112f54f66bbf4e58414e993c7a5fd989ec39 100644 --- a/src/share/vm/asm/codeBuffer.cpp +++ b/src/share/vm/asm/codeBuffer.cpp @@ -758,7 +758,7 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { } } - if (dest->blob() == NULL) { + if (dest->blob() == NULL && dest_filled != NULL) { // Destination is a final resting place, not just another buffer. // Normalize uninitialized bytes in the final padding. Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp index 2febc7b567561a96a08d7c056073a1f7adb70877..b29efcb0fa457b12cf6bc6e1881204ff60c5d489 100644 --- a/src/share/vm/classfile/vmSymbols.hpp +++ b/src/share/vm/classfile/vmSymbols.hpp @@ -115,6 +115,7 @@ /* Java runtime version access */ \ template(sun_misc_Version, "sun/misc/Version") \ template(java_runtime_name_name, "java_runtime_name") \ + template(java_runtime_version_name, "java_runtime_version") \ \ /* class file format tags */ \ template(tag_source_file, "SourceFile") \ diff --git a/src/share/vm/interpreter/abstractInterpreter.hpp b/src/share/vm/interpreter/abstractInterpreter.hpp index b0870c0f1e2aad9c548bb98238fb939dfab0f253..4dc2ef451c28ab2eea86db4a369ef9ccd9e06b32 100644 --- a/src/share/vm/interpreter/abstractInterpreter.hpp +++ b/src/share/vm/interpreter/abstractInterpreter.hpp @@ -320,6 +320,7 @@ class AbstractInterpreterGenerator: public StackObj { void bang_stack_shadow_pages(bool native_call); void generate_all(); + void initialize_method_handle_entries(); public: AbstractInterpreterGenerator(StubQueue* _code); diff --git a/src/share/vm/interpreter/bytecodeInterpreter.cpp b/src/share/vm/interpreter/bytecodeInterpreter.cpp index 6c18761dba279e9f7d27d265da1c10c91b5adbe5..a9d6cc9a98169e2e751b706837411781bd44802d 100644 --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp @@ -235,10 +235,6 @@ #endif #endif -// JavaStack Implementation -#define MORE_STACK(count) \ - (topOfStack -= ((count) * Interpreter::stackElementWords)) - #define UPDATE_PC(opsize) {pc += opsize; } /* @@ -575,7 +571,7 @@ BytecodeInterpreter::run(interpreterState istate) { /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, -/* 0xE8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, +/* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, @@ -1773,7 +1769,7 @@ run: oop obj; if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { - Klass* k = (Klass*) cache->f1(); + Klass* k = cache->f1_as_klass(); obj = k->java_mirror(); MORE_STACK(1); // Assume single slot push } else { @@ -1885,7 +1881,7 @@ run: --count; } if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { - Klass* k = (Klass*) cache->f1(); + Klass* k = cache->f1_as_klass(); obj = k->java_mirror(); } else { --count; @@ -2190,6 +2186,7 @@ run: } CASE(_invokedynamic): { + if (!EnableInvokeDynamic) { // We should not encounter this bytecode if !EnableInvokeDynamic. // The verifier will stop it. However, if we get past the verifier, @@ -2199,30 +2196,68 @@ run: ShouldNotReachHere(); } - int index = Bytes::get_native_u4(pc+1); + u4 index = Bytes::get_native_u4(pc+1); + ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) // This kind of CP cache entry does not need to match the flags byte, because // there is a 1-1 relation between bytecode type and CP entry type. - ConstantPool* constants = METHOD->constants(); - oop result = constants->resolved_references()->obj_at(index); - if (result == NULL) { + if (! cache->is_resolved((Bytecodes::Code) opcode)) { CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), handle_exception); - result = THREAD->vm_result(); + cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); } - VERIFY_OOP(result); - oop method_handle = java_lang_invoke_CallSite::target(result); - CHECK_NULL(method_handle); + Method* method = cache->f1_as_method(); + VERIFY_OOP(method); + + if (cache->has_appendix()) { + ConstantPool* constants = METHOD->constants(); + SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); + MORE_STACK(1); + } - istate->set_msg(call_method_handle); - istate->set_callee((Method*) method_handle); + istate->set_msg(call_method); + istate->set_callee(method); + istate->set_callee_entry_point(method->from_interpreted_entry()); istate->set_bcp_advance(5); UPDATE_PC_AND_RETURN(0); // I'll be back... } + CASE(_invokehandle): { + + if (!EnableInvokeDynamic) { + ShouldNotReachHere(); + } + + u2 index = Bytes::get_native_u2(pc+1); + ConstantPoolCacheEntry* cache = cp->entry_at(index); + + if (! cache->is_resolved((Bytecodes::Code) opcode)) { + CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), + handle_exception); + cache = cp->entry_at(index); + } + + Method* method = cache->f1_as_method(); + + VERIFY_OOP(method); + + if (cache->has_appendix()) { + ConstantPool* constants = METHOD->constants(); + SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); + MORE_STACK(1); + } + + istate->set_msg(call_method); + istate->set_callee(method); + istate->set_callee_entry_point(method->from_interpreted_entry()); + istate->set_bcp_advance(3); + + UPDATE_PC_AND_RETURN(0); // I'll be back... + } + CASE(_invokeinterface): { u2 index = Bytes::get_native_u2(pc+1); diff --git a/src/share/vm/interpreter/bytecodeInterpreter.hpp b/src/share/vm/interpreter/bytecodeInterpreter.hpp index c1614cdf8e571e070d580f5745717a661d4dc733..d528034d3f002591e4795e44fcfb37023f9742d2 100644 --- a/src/share/vm/interpreter/bytecodeInterpreter.hpp +++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp @@ -50,6 +50,10 @@ #ifdef CC_INTERP +// JavaStack Implementation +#define MORE_STACK(count) \ + (topOfStack -= ((count) * Interpreter::stackElementWords)) + // CVM definitions find hotspot equivalents... union VMJavaVal64 { @@ -107,7 +111,6 @@ public: rethrow_exception, // unwinding and throwing exception // requests to frame manager from C++ interpreter call_method, // request for new frame from interpreter, manager responds with method_entry - call_method_handle, // like the above, except the callee is a method handle return_from_method, // request from interpreter to unwind, manager responds with method_continue more_monitors, // need a new monitor throwing_exception, // unwind stack and rethrow diff --git a/src/share/vm/interpreter/cppInterpreter.cpp b/src/share/vm/interpreter/cppInterpreter.cpp index 9a6669519f6e78356f3347281b7ea78af369e0d1..0007aa8be25637b787b529bd99d0af50dad6823f 100644 --- a/src/share/vm/interpreter/cppInterpreter.cpp +++ b/src/share/vm/interpreter/cppInterpreter.cpp @@ -117,7 +117,6 @@ void CppInterpreterGenerator::generate_all() { method_entry(empty); method_entry(accessor); method_entry(abstract); - method_entry(method_handle); method_entry(java_lang_math_sin ); method_entry(java_lang_math_cos ); method_entry(java_lang_math_tan ); @@ -125,7 +124,12 @@ void CppInterpreterGenerator::generate_all() { method_entry(java_lang_math_sqrt ); method_entry(java_lang_math_log ); method_entry(java_lang_math_log10 ); + method_entry(java_lang_math_pow ); + method_entry(java_lang_math_exp ); method_entry(java_lang_ref_reference_get); + + initialize_method_handle_entries(); + Interpreter::_native_entry_begin = Interpreter::code()->code_end(); method_entry(native); method_entry(native_synchronized); diff --git a/src/share/vm/interpreter/interpreter.cpp b/src/share/vm/interpreter/interpreter.cpp index dad8f9ec7519e343e2fe00082b68ac60ed5f6401..4513eebb79c247b44dfc02eaf294734910d184a3 100644 --- a/src/share/vm/interpreter/interpreter.cpp +++ b/src/share/vm/interpreter/interpreter.cpp @@ -464,3 +464,11 @@ void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { } } } + +void AbstractInterpreterGenerator::initialize_method_handle_entries() { + // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate: + for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) { + Interpreter::MethodKind kind = (Interpreter::MethodKind) i; + Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; + } +} diff --git a/src/share/vm/interpreter/templateInterpreter.cpp b/src/share/vm/interpreter/templateInterpreter.cpp index beb99fa15e4e9c41398fc8444881529ec425e860..53e50b1c7fe7be8cd1f06360abdb74465ebe1884 100644 --- a/src/share/vm/interpreter/templateInterpreter.cpp +++ b/src/share/vm/interpreter/templateInterpreter.cpp @@ -373,11 +373,7 @@ void TemplateInterpreterGenerator::generate_all() { method_entry(java_lang_math_pow ) method_entry(java_lang_ref_reference_get) - // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate: - for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) { - Interpreter::MethodKind kind = (Interpreter::MethodKind) i; - Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; - } + initialize_method_handle_entries(); // all native method kinds (must be one contiguous block) Interpreter::_native_entry_begin = Interpreter::code()->code_end(); diff --git a/src/share/vm/memory/collectorPolicy.cpp b/src/share/vm/memory/collectorPolicy.cpp index 4d42723020677539d1976fae53210a14e0e32e64..6c0b1bd82c4c3d693cb6d38d3f1c16e56990c721 100644 --- a/src/share/vm/memory/collectorPolicy.cpp +++ b/src/share/vm/memory/collectorPolicy.cpp @@ -742,6 +742,8 @@ MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( uint gc_count = 0; uint full_gc_count = 0; + assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); + do { MetaWord* result = NULL; if (GC_locker::is_active_and_needs_gc()) { @@ -756,7 +758,6 @@ MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( } JavaThread* jthr = JavaThread::current(); if (!jthr->in_critical()) { - MutexUnlocker mul(Heap_lock); // Wait for JNI critical section to be exited GC_locker::stall_until_clear(); // The GC invoked by the last thread leaving the critical diff --git a/src/share/vm/prims/jvmti.xml b/src/share/vm/prims/jvmti.xml index 512e3cb00337c3c3d0c72fb3e353b45fd2e1b8f2..517083f20e30ebabd656428a6bd46aad30f0a326 100644 --- a/src/share/vm/prims/jvmti.xml +++ b/src/share/vm/prims/jvmti.xml @@ -1,7 +1,7 @@