From 75352d4562fde296b8cec4a04023ba76c54b4c59 Mon Sep 17 00:00:00 2001 From: "yunyao.zxl" Date: Wed, 22 Jul 2020 17:16:41 +0800 Subject: [PATCH] [Coroutine] Import coroutine patch Summary: - From http://hg.openjdk.java.net/mlvm/mlvm/hotspot/file/4cd7d914b0e3/coro-simple.patch. - Resolve trivial conflicts in: systemDictionary.hpp, thread.cpp, debug.hpp. - Add an option "EnableCoroutine". Test Plan: run jtreg tests Reviewed-by: yuleil, shiyuexw, sanhong Issue: https://github.com/alibaba/dragonwell8/issues/113 --- src/cpu/x86/vm/sharedRuntime_x86_32.cpp | 178 ++++++++++ src/cpu/x86/vm/sharedRuntime_x86_64.cpp | 225 ++++++++++++ src/os/windows/vm/os_windows.cpp | 3 +- src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp | 22 ++ .../linux_x86/vm/threadLS_linux_x86.cpp | 23 ++ .../windows_x86/vm/threadLS_windows_x86.cpp | 9 + src/share/vm/classfile/javaClasses.cpp | 23 ++ src/share/vm/classfile/javaClasses.hpp | 18 + src/share/vm/classfile/systemDictionary.hpp | 4 + src/share/vm/classfile/vmSymbols.hpp | 31 +- src/share/vm/prims/jni.cpp | 26 +- src/share/vm/prims/jvm.cpp | 16 + src/share/vm/prims/nativeLookup.cpp | 6 + src/share/vm/prims/unsafe.cpp | 147 ++++++++ src/share/vm/runtime/coroutine.cpp | 333 ++++++++++++++++++ src/share/vm/runtime/coroutine.hpp | 245 +++++++++++++ src/share/vm/runtime/frame.cpp | 5 + src/share/vm/runtime/frame.hpp | 1 + src/share/vm/runtime/globals_ext.hpp | 10 +- src/share/vm/runtime/handles.cpp | 17 + src/share/vm/runtime/handles.hpp | 8 + src/share/vm/runtime/javaCalls.cpp | 9 + src/share/vm/runtime/javaCalls.hpp | 2 + src/share/vm/runtime/thread.cpp | 58 +++ src/share/vm/runtime/thread.hpp | 31 ++ src/share/vm/runtime/threadLocalStorage.cpp | 8 + src/share/vm/runtime/threadLocalStorage.hpp | 6 + src/share/vm/utilities/debug.cpp | 10 + src/share/vm/utilities/debug.hpp | 1 + 29 files changed, 1471 insertions(+), 4 deletions(-) create mode 100644 src/share/vm/runtime/coroutine.cpp create mode 100644 src/share/vm/runtime/coroutine.hpp diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp index d701d4495..50e870f95 100644 --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp @@ -41,6 +41,8 @@ #include "opto/runtime.hpp" #endif +#include "runtime/coroutine.hpp" + #define __ masm-> const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; @@ -1485,6 +1487,8 @@ static void gen_special_dispatch(MacroAssembler* masm, receiver_reg, member_reg, /*for_compiler_entry:*/ true); } +void create_switchTo_contents(MacroAssembler *masm, int start, OopMapSet* oop_maps, int &stack_slots, int total_in_args, BasicType *in_sig_bt, VMRegPair *in_regs, BasicType ret_type, bool terminate); + // --------------------------------------------------------------------------- // Generate a native wrapper for a given method. The method takes arguments // in the Java compiled code convention, marshals them to the native @@ -1809,6 +1813,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ fat_nop(); } + if (EnableCoroutine) { + // the coroutine support methods have a hand-coded fast version that will handle the most common cases + if (method->intrinsic_id() == vmIntrinsics::_switchTo) { + create_switchTo_contents(masm, start, oop_maps, stack_slots, total_in_args, in_sig_bt, in_regs, ret_type, false); + } else if (method->intrinsic_id() == vmIntrinsics::_switchToAndTerminate || + method->intrinsic_id() == vmIntrinsics::_switchToAndExit) { + create_switchTo_contents(masm, start, oop_maps, stack_slots, total_in_args, in_sig_bt, in_regs, ret_type, true); + } + } + // Generate a new frame for the wrapper. __ enter(); // -2 because return address is already present and so is saved rbp @@ -3537,3 +3551,167 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha // frame_size_words or bytes?? return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); } + +void stop_if(MacroAssembler *masm, Assembler::Condition condition, const char* message) { + Label skip; + __ jcc(masm->negate_condition(condition), skip); + + __ warn(message); + __ int3(); + + __ bind(skip); +} + +void stop_if_null(MacroAssembler *masm, Register reg, const char* message) { + __ testptr(reg, reg); + stop_if(masm, Assembler::zero, message); +} + +void stop_if_null(MacroAssembler *masm, Address adr, const char* message) { + __ cmpptr(adr, 0); + stop_if(masm, Assembler::zero, message); +} + +void check_heap_internal(const char* msg) { + tty->print("check(%s) ", msg); + os::check_heap(true); +} + +void check_heap(MacroAssembler *masm, const char* msg) { + __ pusha(); + __ push((int)msg); + __ call(RuntimeAddress((address)check_heap_internal)); + __ addptr(rsp, HeapWordSize); + __ popa(); +} + +void create_switchTo_contents(MacroAssembler *masm, int start, OopMapSet* oop_maps, int &stack_slots, int total_in_args, + BasicType *in_sig_bt, VMRegPair *in_regs, BasicType ret_type, bool terminate) { + assert(total_in_args == 2, "wrong number of arguments"); + + if (terminate) { + __ get_thread(rax); + __ movptr(Address(rax, JavaThread::coroutine_temp_offset()), rcx); + } + + Register target_coroutine = rdx; + // check that we're dealing with sane objects... + DEBUG_ONLY(stop_if_null(masm, target_coroutine, "null new_coroutine")); + __ movptr(target_coroutine, Address(target_coroutine, java_dyn_CoroutineBase::get_data_offset())); + DEBUG_ONLY(stop_if_null(masm, target_coroutine, "new_coroutine without data")); + + { + ////////////////////////////////////////////////////////////////////////// + // store information into the old coroutine's object + // + // valid registers: rcx = old Coroutine, rdx = target Coroutine + + Register old_coroutine = rcx; + Register old_stack = rdi; + Register temp = rsi; + + // check that we're dealing with sane objects... + DEBUG_ONLY(stop_if_null(masm, old_coroutine, "null old_coroutine")); + __ movptr(old_coroutine, Address(old_coroutine, java_dyn_CoroutineBase::get_data_offset())); + DEBUG_ONLY(stop_if_null(masm, target_coroutine, "old_coroutine without data")); + + __ movptr(old_stack, Address(old_coroutine, Coroutine::stack_offset())); + +#ifdef _WINDOWS + // rescue the SEH pointer + __ prefix(Assembler::FS_segment); + __ movptr(temp, Address(noreg, 0x00)); + __ movptr(Address(old_coroutine, Coroutine::last_SEH_offset()), temp); +#endif + + Register thread = rax; + __ get_thread(thread); + + __ movl(Address(old_coroutine, Coroutine::state_offset()) , Coroutine::_onstack); + + // rescue old handle and resource areas + __ movptr(temp, Address(thread, Thread::handle_area_offset())); + __ movptr(Address(old_coroutine, Coroutine::handle_area_offset()), temp); + __ movptr(temp, Address(thread, Thread::resource_area_offset())); + __ movptr(Address(old_coroutine, Coroutine::resource_area_offset()), temp); + __ movptr(temp, Address(thread, Thread::last_handle_mark_offset())); + __ movptr(Address(old_coroutine, Coroutine::last_handle_mark_offset()), temp); + + // push the current IP and frame pointer onto the stack + __ push(rbp); + + __ movptr(Address(old_stack, CoroutineStack::last_sp_offset()), rsp); + } + + { + ////////////////////////////////////////////////////////////////////////// + // perform the switch to the new stack + // + // valid registers: rdx = target Coroutine + + Register target_stack = rbx; + __ movptr(target_stack, Address(target_coroutine, Coroutine::stack_offset())); + + __ movl(Address(target_coroutine, Coroutine::state_offset()), Coroutine::_current); + + Register temp = rsi; + Register temp2 = rdi; + { + Register thread = rax; + __ get_thread(rax); + // set new handle and resource areas + __ movptr(temp, Address(target_coroutine, Coroutine::handle_area_offset())); + __ movptr(Address(target_coroutine, Coroutine::handle_area_offset()), (intptr_t)NULL_WORD); // TODO is this really needed? + __ movptr(Address(thread, Thread::handle_area_offset()), temp); + __ movptr(temp, Address(target_coroutine, Coroutine::resource_area_offset())); + __ movptr(Address(target_coroutine, Coroutine::resource_area_offset()), (intptr_t)NULL_WORD); // TODO is this really needed? + __ movptr(Address(thread, Thread::resource_area_offset()), temp); + __ movptr(temp, Address(target_coroutine, Coroutine::last_handle_mark_offset())); + __ movptr(Address(target_coroutine, Coroutine::last_handle_mark_offset()), (intptr_t)NULL_WORD); // TODO is this really needed? + __ movptr(Address(thread, Thread::last_handle_mark_offset()), temp); + + // update the thread's stack base and size + __ movptr(temp, Address(target_stack, CoroutineStack::stack_base_offset())); + __ movptr(Address(thread, JavaThread::stack_base_offset()), temp); + __ movl(temp2, Address(target_stack, CoroutineStack::stack_size_offset())); + __ movl(Address(thread, JavaThread::stack_size_offset()), temp2); + } +#ifdef _WINDOWS + { + Register tib = rax; + // get the linear address of the TIB (thread info block) + __ prefix(Assembler::FS_segment); + __ movptr(tib, Address(noreg, 0x18)); + + // update the TIB stack base and top + __ movptr(Address(tib, 4), temp); + __ subptr(temp, temp2); + __ movptr(Address(tib, 8), temp); + + // exchange the TIB structured exception handler pointer + __ movptr(temp, Address(target_coroutine, Coroutine::last_SEH_offset())); + __ movptr(Address(tib, 0), temp); + } +#endif + // restore the stack pointer + __ movptr(temp, Address(target_stack, CoroutineStack::last_sp_offset())); + __ movptr(rsp, temp); + + __ pop(rbp); + + if (!terminate) { + ////////////////////////////////////////////////////////////////////////// + // normal case (resume immediately) + __ ret(0); // <-- this will jump to the stored IP of the target coroutine + + } else { + ////////////////////////////////////////////////////////////////////////// + // slow case (terminate old coroutine) + __ get_thread(rax); + __ movptr(rcx, Address(rax, JavaThread::coroutine_temp_offset())); + __ movptr(Address(rax, JavaThread::coroutine_temp_offset()), (intptr_t)NULL_WORD); + __ movptr(rdx, (intptr_t)NULL_WORD); + + } + } +} diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp index 2ba2c1531..e87a1e3b2 100644 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp @@ -44,6 +44,11 @@ #include "opto/runtime.hpp" #endif +#include "runtime/coroutine.hpp" + +void coroutine_start(Coroutine* coroutine, jobject coroutineObj); + + #define __ masm-> const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; @@ -1720,6 +1725,9 @@ static void gen_special_dispatch(MacroAssembler* masm, receiver_reg, member_reg, /*for_compiler_entry:*/ true); } +void create_switchTo_contents(MacroAssembler *masm, int start, OopMapSet* oop_maps, int &stack_slots, int total_in_args, BasicType *in_sig_bt, VMRegPair *in_regs, BasicType ret_type, bool terminate); + + // --------------------------------------------------------------------------- // Generate a native wrapper for a given method. The method takes arguments // in the Java compiled code convention, marshals them to the native @@ -2007,6 +2015,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ fat_nop(); } + if (EnableCoroutine) { + // the coroutine support methods have a hand-coded fast version that will handle the most common cases + if (method->intrinsic_id() == vmIntrinsics::_switchTo) { + create_switchTo_contents(masm, start, oop_maps, stack_slots, total_in_args, in_sig_bt, in_regs, ret_type, false); + } else if (method->intrinsic_id() == vmIntrinsics::_switchToAndTerminate || + method->intrinsic_id() == vmIntrinsics::_switchToAndExit) { + create_switchTo_contents(masm, start, oop_maps, stack_slots, total_in_args, in_sig_bt, in_regs, ret_type, true); + } + } + // Generate a new frame for the wrapper. __ enter(); // -2 because return address is already present and so is saved rbp @@ -2523,6 +2541,22 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, } // Return + if (EnableCoroutine && + (method->intrinsic_id() == vmIntrinsics::_switchToAndTerminate || + method->intrinsic_id() == vmIntrinsics::_switchToAndExit)) { + + Label normal; + __ lea(rcx, RuntimeAddress((unsigned char*)coroutine_start)); + __ cmpq(Address(rsp, 0), rcx); + __ jcc(Assembler::notEqual, normal); + + __ movq(c_rarg0, Address(rsp, HeapWordSize * 2)); + __ movq(c_rarg1, Address(rsp, HeapWordSize * 3)); + + __ bind(normal); + + __ ret(0); // <-- this will jump to the stored IP of the target coroutine + } __ ret(0); @@ -4349,3 +4383,194 @@ void OptoRuntime::generate_exception_blob() { _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); } #endif // COMPILER2 + + +void stop_if(MacroAssembler *masm, Assembler::Condition condition, const char* message) { + Label skip; + __ jcc(masm->negate_condition(condition), skip); + + __ stop(message); + __ int3(); + + __ bind(skip); +} + +void stop_if_null(MacroAssembler *masm, Register reg, const char* message) { + __ testptr(reg, reg); + stop_if(masm, Assembler::zero, message); +} + +void stop_if_null(MacroAssembler *masm, Address adr, const char* message) { + __ cmpptr(adr, 0); + stop_if(masm, Assembler::zero, message); +} + +MacroAssembler* debug_line(MacroAssembler* masm, int l) { + masm->movl(r13, l); + return masm; +} + +void create_switchTo_contents(MacroAssembler *masm, int start, OopMapSet* oop_maps, int &stack_slots, int total_in_args, + BasicType *in_sig_bt, VMRegPair *in_regs, BasicType ret_type, bool terminate) { + assert(total_in_args == 2, "wrong number of arguments"); + + if (j_rarg0 != rsi) { + __ movptr(rsi, j_rarg0); + } + if (j_rarg1 != rdx) { + __ movptr(rdx, j_rarg1); + } + + // push the current IP and frame pointer onto the stack + __ push(rbp); + + Register thread = r15; + Register target_coroutine = rdx; + // check that we're dealing with sane objects... + DEBUG_ONLY(stop_if_null(masm, target_coroutine, "null new_coroutine")); + __ movptr(target_coroutine, Address(target_coroutine, java_dyn_CoroutineBase::get_data_offset())); + DEBUG_ONLY(stop_if_null(masm, target_coroutine, "new_coroutine without data")); + +/* +#ifdef ASSERT +#undef __ +#define __ debug_line(masm, __LINE__)-> +#endif +*/ + { + ////////////////////////////////////////////////////////////////////////// + // store information into the old coroutine's object + // + // valid registers: rsi = old Coroutine, rdx = target Coroutine + + Register old_coroutine_obj = rsi; + Register old_coroutine = r9; + Register old_stack = r10; + Register temp = r8; + + // check that we're dealing with sane objects... + DEBUG_ONLY(stop_if_null(masm, old_coroutine_obj, "null old_coroutine")); + __ movptr(old_coroutine, Address(old_coroutine_obj, java_dyn_CoroutineBase::get_data_offset())); + DEBUG_ONLY(stop_if_null(masm, old_coroutine, "old_coroutine without data")); + __ movptr(old_stack, Address(old_coroutine, Coroutine::stack_offset())); + +#if defined(_WINDOWS) + // rescue the SEH pointer + __ prefix(Assembler::GS_segment); + __ movptr(temp, Address(noreg, 0x00)); + __ movptr(Address(old_coroutine, Coroutine::last_SEH_offset()), temp); +#endif + + __ movl(Address(old_coroutine, Coroutine::state_offset()) , Coroutine::_onstack); + + // rescue old handle and resource areas + __ movptr(temp, Address(thread, Thread::handle_area_offset())); + __ movptr(Address(old_coroutine, Coroutine::handle_area_offset()), temp); + __ movptr(temp, Address(thread, Thread::resource_area_offset())); + __ movptr(Address(old_coroutine, Coroutine::resource_area_offset()), temp); + __ movptr(temp, Address(thread, Thread::last_handle_mark_offset())); + __ movptr(Address(old_coroutine, Coroutine::last_handle_mark_offset()), temp); +#ifdef ASSERT + __ movl(temp, Address(thread, JavaThread::java_call_counter_offset())); + __ movl(Address(old_coroutine, Coroutine::java_call_counter_offset()), temp); +#endif + + __ movptr(Address(old_stack, CoroutineStack::last_sp_offset()), rsp); + } + Register target_stack = r12; + __ movptr(target_stack, Address(target_coroutine, Coroutine::stack_offset())); + + { + ////////////////////////////////////////////////////////////////////////// + // perform the switch to the new stack + // + // valid registers: rdx = target Coroutine + + __ movl(Address(target_coroutine, Coroutine::state_offset()), Coroutine::_current); + + Register temp = r8; + Register temp2 = r9; + { + Register thread = r15; + // set new handle and resource areas + __ movptr(temp, Address(target_coroutine, Coroutine::handle_area_offset())); + __ movptr(Address(thread, Thread::handle_area_offset()), temp); + __ movptr(temp, Address(target_coroutine, Coroutine::resource_area_offset())); + __ movptr(Address(thread, Thread::resource_area_offset()), temp); + __ movptr(temp, Address(target_coroutine, Coroutine::last_handle_mark_offset())); + __ movptr(Address(thread, Thread::last_handle_mark_offset()), temp); + +#ifdef ASSERT + __ movl(temp, Address(target_coroutine, Coroutine::java_call_counter_offset())); + __ movl(Address(thread, JavaThread::java_call_counter_offset()), temp); + + __ movptr(Address(target_coroutine, Coroutine::handle_area_offset()), (intptr_t)NULL_WORD); + __ movptr(Address(target_coroutine, Coroutine::resource_area_offset()), (intptr_t)NULL_WORD); + __ movptr(Address(target_coroutine, Coroutine::last_handle_mark_offset()), (intptr_t)NULL_WORD); + __ movl(Address(target_coroutine, Coroutine::java_call_counter_offset()), 0); +#endif + + // update the thread's stack base and size + __ movptr(temp, Address(target_stack, CoroutineStack::stack_base_offset())); + __ movptr(Address(thread, JavaThread::stack_base_offset()), temp); + __ movl(temp2, Address(target_stack, CoroutineStack::stack_size_offset())); + __ movl(Address(thread, JavaThread::stack_size_offset()), temp2); + } +#if defined(_WINDOWS) + { + Register tib = rax; + // get the linear address of the TIB (thread info block) + __ prefix(Assembler::GS_segment); + __ movptr(tib, Address(noreg, 0x30)); + + // update the TIB stack base and top + __ movptr(Address(tib, 0x8), temp); + __ subptr(temp, temp2); + __ movptr(Address(tib, 0x10), temp); + + // exchange the TIB structured exception handler pointer + __ movptr(temp, Address(target_coroutine, Coroutine::last_SEH_offset())); + __ movptr(Address(tib, 0), temp); + } +#endif + // restore the stack pointer + __ movptr(temp, Address(target_stack, CoroutineStack::last_sp_offset())); + __ movptr(rsp, temp); + + __ pop(rbp); + + __ int3(); + + if (!terminate) { + ////////////////////////////////////////////////////////////////////////// + // normal case (resume immediately) + + // this will reset r12 + __ reinit_heapbase(); + + Label normal; + __ lea(rcx, RuntimeAddress((unsigned char*)coroutine_start)); + __ cmpq(Address(rsp, 0), rcx); + __ jcc(Assembler::notEqual, normal); + + __ movq(c_rarg0, Address(rsp, HeapWordSize * 2)); + __ movq(c_rarg1, Address(rsp, HeapWordSize * 3)); + + __ bind(normal); + + __ ret(0); // <-- this will jump to the stored IP of the target coroutine + + } else { + ////////////////////////////////////////////////////////////////////////// + // slow case (terminate old coroutine) + + // this will reset r12 + __ reinit_heapbase(); + + if (j_rarg0 != rsi) { + __ movptr(j_rarg0, rsi); + } + __ movptr(j_rarg1, 0); + } + } +} diff --git a/src/os/windows/vm/os_windows.cpp b/src/os/windows/vm/os_windows.cpp index 74412a3eb..c545be0d1 100644 --- a/src/os/windows/vm/os_windows.cpp +++ b/src/os/windows/vm/os_windows.cpp @@ -2589,7 +2589,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { bool in_java = thread->thread_state() == _thread_in_Java; // Handle potential stack overflows up front. - if (exception_code == EXCEPTION_STACK_OVERFLOW) { + if (exception_code == EXCEPTION_STACK_OVERFLOW || + (EnableCoroutine && exception_code == EXCEPTION_GUARD_PAGE)) { if (os::uses_stack_guard_pages()) { #ifdef _M_IA64 // Use guard page for register stack. diff --git a/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp b/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp index 0515fbc3f..3eb7105d0 100644 --- a/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp +++ b/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp @@ -90,3 +90,25 @@ void ThreadLocalStorage::pd_set_thread(Thread* thread) { } #endif // !AMD64 } + +void ThreadLocalStorage::pd_add_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { +#ifndef AMD64 + for (address p = stack_base - stack_size; p < stack_base; p += PAGE_SIZE) { + assert(thread == NULL || _sp_map[(uintptr_t)p >> PAGE_SHIFT] == NULL || + thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT], + "coroutine exited without detaching from VM??"); + _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread; + } +#endif // !AMD64 +} + + +void ThreadLocalStorage::pd_remove_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { +#ifndef AMD64 + for (address p = stack_base - stack_size; p < stack_base; p += PAGE_SIZE) { + assert(thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT], + "coroutine exited without detaching from VM??"); + _sp_map[(uintptr_t)p >> PAGE_SHIFT] = NULL; + } +#endif // !AMD64 +} diff --git a/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp b/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp index ffc89af98..ac85d8127 100644 --- a/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp +++ b/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp @@ -96,4 +96,27 @@ void ThreadLocalStorage::pd_init() { void ThreadLocalStorage::pd_set_thread(Thread* thread) { os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); } + +void ThreadLocalStorage::pd_add_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { +#ifndef AMD64 + for (address p = stack_base - stack_size; p < stack_base; p += PAGE_SIZE) { + assert(thread == NULL || _sp_map[(uintptr_t)p >> PAGE_SHIFT] == NULL || + thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT], + "coroutine exited without detaching from VM??"); + _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread; + } +#endif // !AMD64 +} + + +void ThreadLocalStorage::pd_remove_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { +#ifndef AMD64 + for (address p = stack_base - stack_size; p < stack_base; p += PAGE_SIZE) { + assert(thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT], + "coroutine exited without detaching from VM??"); + _sp_map[(uintptr_t)p >> PAGE_SHIFT] = NULL; + } +#endif // !AMD64 +} + #endif // !AMD64 && !MINIMIZE_RAM_USAGE diff --git a/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp b/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp index f363816d9..cb26aebb0 100644 --- a/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp +++ b/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp @@ -47,3 +47,12 @@ void ThreadLocalStorage::pd_init() { } void ThreadLocalStorage::pd_set_thread(Thread* thread) { os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); } + + +void ThreadLocalStorage::pd_add_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { + // nothing to do +} + +void ThreadLocalStorage::pd_remove_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { + // nothing to do +} diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp index 0ee665832..68aee45a5 100644 --- a/src/share/vm/classfile/javaClasses.cpp +++ b/src/share/vm/classfile/javaClasses.cpp @@ -3386,6 +3386,25 @@ int com_alibaba_tenant_TenantState::state_of(oop tenant_obj) { #endif // INCLUDE_ALL_GCS +/* stack manipulation */ + +int java_dyn_CoroutineBase::_data_offset = 0; + +void java_dyn_CoroutineBase::compute_offsets() { + Klass* k = SystemDictionary::java_dyn_CoroutineBase_klass(); + if (k != NULL) { + compute_offset(_data_offset, k, vmSymbols::data_name(), vmSymbols::long_signature()); + } +} + +jlong java_dyn_CoroutineBase::data(oop obj) { + return obj->long_field(_data_offset); +} + +void java_dyn_CoroutineBase::set_data(oop obj, jlong value) { + obj->long_field_put(_data_offset, value); +} + void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) { if (_owner_offset != 0) return; @@ -3496,6 +3515,10 @@ void JavaClasses::compute_offsets() { // generated interpreter code wants to know about the offsets we just computed: AbstractAssembler::update_delayed_values(); + if (EnableCoroutine) { + java_dyn_CoroutineBase::compute_offsets(); + } + if(MultiTenant) { com_alibaba_tenant_TenantContainer::compute_offsets(); } diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp index ba08769e6..bc2923f37 100644 --- a/src/share/vm/classfile/javaClasses.hpp +++ b/src/share/vm/classfile/javaClasses.hpp @@ -1480,6 +1480,24 @@ class InjectedField { CLASSLOADER_INJECTED_FIELDS(macro) \ MEMBERNAME_INJECTED_FIELDS(macro) +class java_dyn_CoroutineBase: AllStatic { +private: + // Note that to reduce dependencies on the JDK we compute these offsets at run-time. + static int _data_offset; + + static void compute_offsets(); + +public: + // Accessors + static jlong data(oop obj); + static void set_data(oop obj, jlong value); + + static int get_data_offset() { return _data_offset; } + + // Debugging + friend class JavaClasses; +}; + // Interface to hard-coded offset checking class JavaClasses : AllStatic { diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp index 20030e289..650612427 100644 --- a/src/share/vm/classfile/systemDictionary.hpp +++ b/src/share/vm/classfile/systemDictionary.hpp @@ -204,6 +204,10 @@ class SymbolPropertyTable; do_klass(Short_klass, java_lang_Short, Pre ) \ do_klass(Integer_klass, java_lang_Integer, Pre ) \ do_klass(Long_klass, java_lang_Long, Pre ) \ + \ + /* Stack manipulation classes */ \ + do_klass(java_dyn_CoroutineSupport_klass, java_dyn_CoroutineSupport, Opt ) \ + do_klass(java_dyn_CoroutineBase_klass, java_dyn_CoroutineBase, Opt ) \ /*end*/ diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp index 1a547dbc7..aff0fa840 100644 --- a/src/share/vm/classfile/vmSymbols.hpp +++ b/src/share/vm/classfile/vmSymbols.hpp @@ -456,6 +456,7 @@ template(void_float_signature, "()F") \ template(void_double_signature, "()D") \ template(int_void_signature, "(I)V") \ + template(long_void_signature, "(J)V") \ template(int_int_signature, "(I)I") \ template(char_char_signature, "(C)C") \ template(short_short_signature, "(S)S") \ @@ -528,7 +529,6 @@ template(object_signature, "Ljava/lang/Object;") \ template(state_name, "state") \ template(com_alibaba_tenant_TenantState_signature, "Lcom/alibaba/tenant/TenantState;") \ - template(object_array_signature, "[Ljava/lang/Object;") \ template(class_signature, "Ljava/lang/Class;") \ template(string_signature, "Ljava/lang/String;") \ template(reference_signature, "Ljava/lang/ref/Reference;") \ @@ -627,6 +627,25 @@ \ /* jfr signatures */ \ JFR_TEMPLATES(template) \ + /* coroutine support */ \ + template(java_dyn_CoroutineSupport, "java/dyn/CoroutineSupport") \ + template(java_dyn_CoroutineBase, "java/dyn/CoroutineBase") \ + template(java_dyn_CoroutineExitException, "java/dyn/CoroutineExitException") \ + template(data_name, "data") \ + template(stack_name, "stack") \ + template(current_name, "current") \ + template(java_dyn_CoroutineBase_signature, "Ljava/dyn/CoroutineBase;") \ + template(reflect_method_signature, "Ljava/lang/reflect/Method;") \ + template(startInternal_method_name, "startInternal") \ + template(initializeCoroutineSupport_method_name, "initializeCoroutineSupport") \ + template(method_name, "method") \ + template(bci_name, "bci") \ + template(localCount_name, "localCount") \ + template(expressionCount_name, "expressionCount") \ + template(scalarValues_name, "scalarValues") \ + template(objectValues_name, "objectValues") \ + template(long_array_signature, "[J") \ + template(object_array_signature, "[Ljava/lang/Object;") \ \ /*end*/ @@ -1095,9 +1114,19 @@ do_name( prefetchReadStatic_name, "prefetchReadStatic") \ do_intrinsic(_prefetchWriteStatic, sun_misc_Unsafe, prefetchWriteStatic_name, prefetch_signature, F_SN) \ do_name( prefetchWriteStatic_name, "prefetchWriteStatic") \ + \ /*== LAST_COMPILER_INLINE*/ \ /*the compiler does have special inlining code for these; bytecode inline is just fine */ \ \ + /* coroutine intrinsics */ \ + do_intrinsic(_switchTo, java_dyn_CoroutineSupport, switchTo_name, switchTo_signature, F_SN) \ + do_name( switchTo_name, "switchTo") \ + do_signature(switchTo_signature, "(Ljava/dyn/CoroutineBase;Ljava/dyn/CoroutineBase;)V") \ + do_intrinsic(_switchToAndTerminate, java_dyn_CoroutineSupport, switchToAndTerminate_name, switchTo_signature, F_SN) \ + do_name( switchToAndTerminate_name, "switchToAndTerminate") \ + do_intrinsic(_switchToAndExit, java_dyn_CoroutineSupport, switchToAndExit_name, switchTo_signature, F_SN) \ + do_name( switchToAndExit_name, "switchToAndExit") \ + \ do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \ \ do_intrinsic(_StringBuilder_void, java_lang_StringBuilder, object_initializer_name, void_method_signature, F_R) \ diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp index 60c31071a..faaa344ff 100644 --- a/src/share/vm/prims/jni.cpp +++ b/src/share/vm/prims/jni.cpp @@ -5268,6 +5268,28 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(test_error_handler); CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(execute_internal_vm_tests); #endif + if (EnableCoroutine) { + JavaThread* __the_thread__ = thread; + HandleMark hm(THREAD); + Handle obj(THREAD, thread->threadObj()); + JavaValue result(T_VOID); + + if (SystemDictionary::java_dyn_CoroutineSupport_klass() != NULL) { + InstanceKlass::cast(SystemDictionary::Class_klass())->initialize(CHECK_0); + InstanceKlass::cast(SystemDictionary::java_dyn_CoroutineSupport_klass())->initialize(CHECK_0); + JavaCalls::call_virtual(&result, + obj, + KlassHandle(THREAD, SystemDictionary::Thread_klass()), + vmSymbols::initializeCoroutineSupport_method_name(), + vmSymbols::void_method_signature(), + THREAD); + if (THREAD->has_pending_exception()) { + java_lang_Throwable::print_stack_trace(THREAD->pending_exception(), tty); + THREAD->clear_pending_exception(); + vm_abort(false); + } + } + } // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving. ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native); @@ -5397,7 +5419,9 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae thread->set_thread_state(_thread_in_vm); // Must do this before initialize_thread_local_storage thread->record_stack_base_and_size(); - + if (EnableCoroutine) { + thread->initialize_coroutine_support(); + } thread->initialize_thread_local_storage(); if (!os::create_attached_thread(thread)) { diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp index 079da8232..7b9c11ac5 100644 --- a/src/share/vm/prims/jvm.cpp +++ b/src/share/vm/prims/jvm.cpp @@ -3092,6 +3092,22 @@ static void thread_entry(JavaThread* thread, TRAPS) { Handle obj(THREAD, thread->threadObj()); JavaValue result(T_VOID); + if (EnableCoroutine && SystemDictionary::java_dyn_CoroutineSupport_klass() != NULL) { + InstanceKlass::cast(SystemDictionary::Class_klass())->initialize(CHECK); + InstanceKlass::cast(SystemDictionary::java_dyn_CoroutineSupport_klass())->initialize(CHECK); + JavaCalls::call_virtual(&result, + obj, + KlassHandle(THREAD, SystemDictionary::Thread_klass()), + vmSymbols::initializeCoroutineSupport_method_name(), + vmSymbols::void_method_signature(), + THREAD); + if (THREAD->has_pending_exception()) { + java_lang_Throwable::print_stack_trace(THREAD->pending_exception(), tty); + THREAD->clear_pending_exception(); + vm_abort(false); + } + } + if(MultiTenant) { oop tenantContainer = java_lang_Thread::inherited_tenant_container(thread->threadObj()); diff --git a/src/share/vm/prims/nativeLookup.cpp b/src/share/vm/prims/nativeLookup.cpp index bf50bdc59..2e788112c 100644 --- a/src/share/vm/prims/nativeLookup.cpp +++ b/src/share/vm/prims/nativeLookup.cpp @@ -125,6 +125,7 @@ char* NativeLookup::long_jni_name(methodHandle method) { } extern "C" { + void JNICALL JVM_RegisterCoroutineSupportMethods(JNIEnv* env, jclass corocls); void JNICALL JVM_RegisterUnsafeMethods(JNIEnv *env, jclass unsafecls); void JNICALL JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass unsafecls); void JNICALL JVM_RegisterPerfMethods(JNIEnv *env, jclass perfclass); @@ -138,6 +139,7 @@ static JNINativeMethod lookup_special_native_methods[] = { { CC"Java_sun_misc_Unsafe_registerNatives", NULL, FN_PTR(JVM_RegisterUnsafeMethods) }, { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) }, { CC"Java_sun_misc_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) }, + { CC"Java_java_dyn_CoroutineSupport_registerNatives", NULL, FN_PTR(JVM_RegisterCoroutineSupportMethods)}, { CC"Java_sun_hotspot_WhiteBox_registerNatives", NULL, FN_PTR(JVM_RegisterWhiteBoxMethods) }, #if INCLUDE_JFR { CC"Java_jdk_jfr_internal_JVM_registerNatives", NULL, FN_PTR(jfr_register_natives) }, @@ -149,6 +151,10 @@ static address lookup_special_native(char* jni_name) { for (int i = 0; i < count; i++) { // NB: To ignore the jni prefix and jni postfix strstr is used matching. if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) { + if (!EnableCoroutine && FN_PTR(JVM_RegisterCoroutineSupportMethods) == + lookup_special_native_methods[i].fnPtr) { + continue; + } return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr); } } diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp index ef22e4a97..ea8f32a4e 100644 --- a/src/share/vm/prims/unsafe.cpp +++ b/src/share/vm/prims/unsafe.cpp @@ -29,9 +29,11 @@ #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #endif // INCLUDE_ALL_GCS #include "jfr/jfrEvents.hpp" +#include "compiler/compileBroker.hpp" #include "memory/allocation.inline.hpp" #include "prims/jni.h" #include "prims/jvm.h" +#include "runtime/coroutine.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/prefetch.inline.hpp" @@ -1358,6 +1360,107 @@ UNSAFE_ENTRY(void, Unsafe_PrefetchWrite(JNIEnv* env, jclass ignored, jobject obj Prefetch::write(addr, (intx)offset); UNSAFE_END +jlong CoroutineSupport_getThreadCoroutine(JNIEnv* env, jclass klass) { + DEBUG_CORO_PRINT("CoroutineSupport_getThreadCoroutine\n"); + + JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); + Coroutine* list = THREAD->coroutine_list(); + assert(list != NULL, "thread isn't initialized for coroutines"); + + return (jlong)list; +} + +void CoroutineSupport_switchTo(JNIEnv* env, jclass klass, jobject old_coroutine, jobject target_coroutine) { + ShouldNotReachHere(); +} + +void CoroutineSupport_switchToAndTerminate(JNIEnv* env, jclass klass, jobject old_coroutine, jobject target_coroutine) { + JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); + + assert(old_coroutine != NULL, "NULL old CoroutineBase in switchToAndTerminate"); + assert(target_coroutine == NULL, "expecting NULL"); + + oop old_oop = JNIHandles::resolve(old_coroutine); + Coroutine* coro = (Coroutine*)java_dyn_CoroutineBase::data(old_oop); + assert(coro != NULL, "NULL old coroutine in switchToAndTerminate"); + + java_dyn_CoroutineBase::set_data(old_oop, 0); + + CoroutineStack* stack = coro->stack(); + stack->remove_from_list(THREAD->coroutine_stack_list()); + if (THREAD->coroutine_stack_cache_size() < MaxFreeCoroutinesCacheSize) { + stack->insert_into_list(THREAD->coroutine_stack_cache()); + THREAD->coroutine_stack_cache_size() ++; + } else { + CoroutineStack::free_stack(stack, THREAD); + } + Coroutine::free_coroutine(coro, THREAD); +} + +void CoroutineSupport_switchToAndExit(JNIEnv* env, jclass klass, jobject old_coroutine, jobject target_coroutine) { + JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); + + { + ThreadInVMfromNative tivm(THREAD); + HandleMark mark(THREAD); + THROW(vmSymbols::java_dyn_CoroutineExitException()); + } +} + +jlong CoroutineSupport_createCoroutine(JNIEnv* env, jclass klass, jobject coroutine, jlong stack_size) { + DEBUG_CORO_PRINT("CoroutineSupport_createCoroutine\n"); + + assert(coroutine != NULL, "cannot create coroutine with NULL Coroutine object"); + + JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); + ThreadInVMfromNative tivm(THREAD); + + if (stack_size == 0 || stack_size < -1) { + THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "invalid stack size"); + } + CoroutineStack* stack = NULL; + if (stack_size <= 0 && THREAD->coroutine_stack_cache_size() > 0) { + stack = THREAD->coroutine_stack_cache(); + stack->remove_from_list(THREAD->coroutine_stack_cache()); + THREAD->coroutine_stack_cache_size() --; + DEBUG_CORO_ONLY(tty->print("reused coroutine stack at %08x\n", stack->_stack_base)); + } else { + stack = CoroutineStack::create_stack(THREAD, stack_size); + if (stack == NULL) { + THROW_0(vmSymbols::java_lang_OutOfMemoryError()); + } + } + stack->insert_into_list(THREAD->coroutine_stack_list()); + + Coroutine* coro = Coroutine::create_coroutine(THREAD, stack, JNIHandles::resolve(coroutine)); + if (coro == NULL) { + ThreadInVMfromNative tivm(THREAD); + HandleMark mark(THREAD); + THROW_0(vmSymbols::java_lang_OutOfMemoryError()); + } + coro->insert_into_list(THREAD->coroutine_list()); + return (jlong)coro; +} + +jboolean CoroutineSupport_isDisposable(JNIEnv* env, jclass klass, jlong coroutineLong) { + DEBUG_CORO_PRINT("CoroutineSupport_isDisposable\n"); + + JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); + Coroutine* coro = (Coroutine*)coroutineLong; + assert(coro != NULL, "cannot free NULL coroutine"); + assert(!coro->is_thread_coroutine(), "cannot free thread coroutine"); + + return coro->is_disposable(); +} + +jobject CoroutineSupport_cleanupCoroutine(JNIEnv* env, jclass klass) { + DEBUG_CORO_PRINT("CoroutineSupport_cleanupCoroutine\n"); + + JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); + // TODO: implementation needed... + + return NULL; +} /// JVM_RegisterUnsafeMethods @@ -1697,6 +1800,22 @@ JNINativeMethod fence_methods[] = { {CC "fullFence", CC "()V", FN_PTR(Unsafe_FullFence)}, }; +#define COBA "Ljava/dyn/CoroutineBase;" + +JNINativeMethod coroutine_support_methods[] = { + {CC"getThreadCoroutine", CC"()J", FN_PTR(CoroutineSupport_getThreadCoroutine)}, + {CC"createCoroutine", CC"("COBA"J)J", FN_PTR(CoroutineSupport_createCoroutine)}, + {CC"isDisposable", CC"(J)Z", FN_PTR(CoroutineSupport_isDisposable)}, + {CC"switchTo", CC"("COBA COBA")V", FN_PTR(CoroutineSupport_switchTo)}, + {CC"switchToAndTerminate", CC"("COBA COBA")V", FN_PTR(CoroutineSupport_switchToAndTerminate)}, + {CC"switchToAndExit", CC"("COBA COBA")V", FN_PTR(CoroutineSupport_switchToAndExit)}, + {CC"cleanupCoroutine", CC"()"COBA, FN_PTR(CoroutineSupport_cleanupCoroutine)}, +}; + +#define COMPILE_CORO_METHODS_FROM (3) + +#undef COBA + #undef CC #undef FN_PTR @@ -1798,3 +1917,31 @@ JVM_ENTRY(void, JVM_RegisterUnsafeMethods(JNIEnv *env, jclass unsafecls)) register_natives("1.8 fence methods", env, unsafecls, fence_methods, sizeof(fence_methods)/sizeof(JNINativeMethod)); } JVM_END + +JVM_ENTRY(void, JVM_RegisterCoroutineSupportMethods(JNIEnv *env, jclass corocls)) + UnsafeWrapper("JVM_RegisterCoroutineSupportMethods"); + { + assert(EnableCoroutine, "coroutine not enabled"); + + ThreadToNativeFromVM ttnfv(thread); + { + int coro_method_count = (int)(sizeof(coroutine_support_methods)/sizeof(JNINativeMethod)); + + for (int i=0; iRegisterNatives(corocls, coroutine_support_methods + i, 1); + if (env->ExceptionOccurred()) { + tty->print_cr("Warning: Coroutine classes not found (%i)", i); + vm_exit(1); + } + } + for (int i=COMPILE_CORO_METHODS_FROM; iGetStaticMethodID(corocls, coroutine_support_methods[i].name, coroutine_support_methods[i].signature); + { + ThreadInVMfromNative tivfn(thread); + methodHandle method(Method::resolve_jmethod_id(id)); + AdapterHandlerLibrary::create_native_wrapper(method); + } + } + } + } +JVM_END diff --git a/src/share/vm/runtime/coroutine.cpp b/src/share/vm/runtime/coroutine.cpp new file mode 100644 index 000000000..121891ffa --- /dev/null +++ b/src/share/vm/runtime/coroutine.cpp @@ -0,0 +1,333 @@ +/* + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/coroutine.hpp" +#ifdef TARGET_ARCH_x86 +# include "vmreg_x86.inline.hpp" +#endif +#ifdef TARGET_ARCH_sparc +# include "vmreg_sparc.inline.hpp" +#endif +#ifdef TARGET_ARCH_zero +# include "vmreg_zero.inline.hpp" +#endif + + +#ifdef _WINDOWS + +LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); + + +void coroutine_start(Coroutine* coroutine, jobject coroutineObj) { + coroutine->thread()->set_thread_state(_thread_in_vm); + + if (UseVectoredExceptions) { + // If we are using vectored exception we don't need to set a SEH + coroutine->run(coroutineObj); + } + else { + // Install a win32 structured exception handler around every thread created + // by VM, so VM can genrate error dump when an exception occurred in non- + // Java thread (e.g. VM thread). + __try { + coroutine->run(coroutineObj); + } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { + } + } + + ShouldNotReachHere(); +} +#endif + +#if defined(LINUX) || defined(_ALLBSD_SOURCE) + +void coroutine_start(Coroutine* coroutine, jobject coroutineObj) { + coroutine->thread()->set_thread_state(_thread_in_vm); + + coroutine->run(coroutineObj); + ShouldNotReachHere(); +} +#endif + +void Coroutine::run(jobject coroutine) { + + // do not call JavaThread::current() here! + + _thread->set_resource_area(new (mtThread) ResourceArea(32)); + _thread->set_handle_area(new (mtThread) HandleArea(NULL, 32)); + + { + HandleMark hm(_thread); + HandleMark hm2(_thread); + Handle obj(_thread, JNIHandles::resolve(coroutine)); + JNIHandles::destroy_global(coroutine); + JavaValue result(T_VOID); + JavaCalls::call_virtual(&result, + obj, + KlassHandle(_thread, SystemDictionary::java_dyn_CoroutineBase_klass()), + vmSymbols::startInternal_method_name(), + vmSymbols::void_method_signature(), + _thread); + } +} + +Coroutine* Coroutine::create_thread_coroutine(JavaThread* thread, CoroutineStack* stack) { + Coroutine* coro = new Coroutine(); + if (coro == NULL) + return NULL; + + coro->_state = _current; + coro->_is_thread_coroutine = true; + coro->_thread = thread; + coro->_stack = stack; + coro->_resource_area = NULL; + coro->_handle_area = NULL; + coro->_last_handle_mark = NULL; +#ifdef ASSERT + coro->_java_call_counter = 0; +#endif +#if defined(_WINDOWS) + coro->_last_SEH = NULL; +#endif + return coro; +} + +Coroutine* Coroutine::create_coroutine(JavaThread* thread, CoroutineStack* stack, oop coroutineObj) { + Coroutine* coro = new Coroutine(); + if (coro == NULL) { + return NULL; + } + + intptr_t** d = (intptr_t**)stack->stack_base(); + *(--d) = NULL; + jobject obj = JNIHandles::make_global(coroutineObj); + *(--d) = (intptr_t*)obj; + *(--d) = (intptr_t*)coro; + *(--d) = NULL; + *(--d) = (intptr_t*)coroutine_start; + *(--d) = NULL; + + stack->set_last_sp((address) d); + + coro->_state = _onstack; + coro->_is_thread_coroutine = false; + coro->_thread = thread; + coro->_stack = stack; + coro->_resource_area = NULL; + coro->_handle_area = NULL; + coro->_last_handle_mark = NULL; +#ifdef ASSERT + coro->_java_call_counter = 0; +#endif +#if defined(_WINDOWS) + coro->_last_SEH = NULL; +#endif + return coro; +} + +void Coroutine::free_coroutine(Coroutine* coroutine, JavaThread* thread) { + coroutine->remove_from_list(thread->coroutine_list()); + delete coroutine; +} + +void Coroutine::frames_do(FrameClosure* fc) { + switch (_state) { + case Coroutine::_current: + // the contents of this coroutine have already been visited + break; + case Coroutine::_onstack: + _stack->frames_do(fc); + break; + case Coroutine::_dead: + // coroutine is dead, ignore + break; + } +} + +class oops_do_Closure: public FrameClosure { +private: + OopClosure* _f; + CodeBlobClosure* _cf; + CLDClosure* _cld_f; +public: + oops_do_Closure(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf): _f(f), _cld_f(cld_f), _cf(cf) { } + void frames_do(frame* fr, RegisterMap* map) { fr->oops_do(_f, _cld_f, _cf, map); } +}; + +void Coroutine::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { + oops_do_Closure fc(f, cld_f, cf); + frames_do(&fc); + if (_state == _onstack &&_handle_area != NULL) { + DEBUG_CORO_ONLY(tty->print_cr("collecting handle area %08x", _handle_area)); + _handle_area->oops_do(f); + } +} + +class nmethods_do_Closure: public FrameClosure { +private: + CodeBlobClosure* _cf; +public: + nmethods_do_Closure(CodeBlobClosure* cf): _cf(cf) { } + void frames_do(frame* fr, RegisterMap* map) { fr->nmethods_do(_cf); } +}; + +void Coroutine::nmethods_do(CodeBlobClosure* cf) { + nmethods_do_Closure fc(cf); + frames_do(&fc); +} + +class metadata_do_Closure: public FrameClosure { +private: + void (*_f)(Metadata*); +public: + metadata_do_Closure(void f(Metadata*)): _f(f) { } + void frames_do(frame* fr, RegisterMap* map) { fr->metadata_do(_f); } +}; + +void Coroutine::metadata_do(void f(Metadata*)) { + metadata_do_Closure fc(f); + frames_do(&fc); +} + +class frames_do_Closure: public FrameClosure { +private: + void (*_f)(frame*, const RegisterMap*); +public: + frames_do_Closure(void f(frame*, const RegisterMap*)): _f(f) { } + void frames_do(frame* fr, RegisterMap* map) { _f(fr, map); } +}; + +void Coroutine::frames_do(void f(frame*, const RegisterMap* map)) { + frames_do_Closure fc(f); + frames_do(&fc); +} + +bool Coroutine::is_disposable() { + return false; +} + + +CoroutineStack* CoroutineStack::create_thread_stack(JavaThread* thread) { + CoroutineStack* stack = new CoroutineStack(0); + if (stack == NULL) + return NULL; + + stack->_thread = thread; + stack->_is_thread_stack = true; + stack->_stack_base = thread->stack_base(); + stack->_stack_size = thread->stack_size(); + stack->_last_sp = NULL; + stack->_default_size = false; + return stack; +} + +CoroutineStack* CoroutineStack::create_stack(JavaThread* thread, intptr_t size/* = -1*/) { + bool default_size = false; + if (size <= 0) { + size = DefaultCoroutineStackSize; + default_size = true; + } + + uint reserved_pages = StackShadowPages + StackRedPages + StackYellowPages; + uintx real_stack_size = size + (reserved_pages * os::vm_page_size()); + uintx reserved_size = align_size_up(real_stack_size, os::vm_allocation_granularity()); + + CoroutineStack* stack = new CoroutineStack(reserved_size); + if (stack == NULL) + return NULL; + if (!stack->_virtual_space.initialize(stack->_reserved_space, real_stack_size)) { + stack->_reserved_space.release(); + delete stack; + return NULL; + } + + stack->_thread = thread; + stack->_is_thread_stack = false; + stack->_stack_base = (address)stack->_virtual_space.high(); + stack->_stack_size = stack->_virtual_space.committed_size(); + stack->_last_sp = NULL; + stack->_default_size = default_size; + + if (os::uses_stack_guard_pages()) { + address low_addr = stack->stack_base() - stack->stack_size(); + size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size(); + + bool allocate = os::allocate_stack_guard_pages(); + + if (!os::guard_memory((char *) low_addr, len)) { + warning("Attempt to protect stack guard pages failed."); + if (os::uncommit_memory((char *) low_addr, len)) { + warning("Attempt to deallocate stack guard pages failed."); + } + } + } + + ThreadLocalStorage::add_coroutine_stack(thread, stack->stack_base(), stack->stack_size()); + DEBUG_CORO_ONLY(tty->print("created coroutine stack at %08x with stack size %i (real size: %i)\n", stack->_stack_base, size, stack->_stack_size)); + return stack; +} + +void CoroutineStack::free_stack(CoroutineStack* stack, JavaThread* thread) { + guarantee(!stack->is_thread_stack(), "cannot free thread stack"); + ThreadLocalStorage::remove_coroutine_stack(thread, stack->stack_base(), stack->stack_size()); + + if (stack->_reserved_space.size() > 0) { + stack->_virtual_space.release(); + stack->_reserved_space.release(); + } + delete stack; +} + +void CoroutineStack::frames_do(FrameClosure* fc) { + assert(_last_sp != NULL, "CoroutineStack with NULL last_sp"); + + DEBUG_CORO_ONLY(tty->print_cr("frames_do stack %08x", _stack_base)); + + intptr_t* fp = ((intptr_t**)_last_sp)[0]; + if (fp != NULL) { + address pc = ((address*)_last_sp)[1]; + intptr_t* sp = ((intptr_t*)_last_sp) + 2; + + frame fr(sp, fp, pc); + StackFrameStream fst(_thread, fr); + fst.register_map()->set_location(rbp->as_VMReg(), (address)_last_sp); + fst.register_map()->set_include_argument_oops(false); + for(; !fst.is_done(); fst.next()) { + fc->frames_do(fst.current(), fst.register_map()); + } + } +} + +frame CoroutineStack::last_frame(Coroutine* coro, RegisterMap& map) const { + DEBUG_CORO_ONLY(tty->print_cr("last_frame CoroutineStack")); + + intptr_t* fp = ((intptr_t**)_last_sp)[0]; + assert(fp != NULL, "coroutine with NULL fp"); + + address pc = ((address*)_last_sp)[1]; + intptr_t* sp = ((intptr_t*)_last_sp) + 2; + + return frame(sp, fp, pc); +} diff --git a/src/share/vm/runtime/coroutine.hpp b/src/share/vm/runtime/coroutine.hpp new file mode 100644 index 000000000..cfd2f3eb6 --- /dev/null +++ b/src/share/vm/runtime/coroutine.hpp @@ -0,0 +1,245 @@ +/* + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_COROUTINE_HPP +#define SHARE_VM_RUNTIME_COROUTINE_HPP + +#include "runtime/jniHandles.hpp" +#include "runtime/handles.hpp" +#include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/javaFrameAnchor.hpp" +#include "runtime/monitorChunk.hpp" + +// number of heap words that prepareSwitch will add as a safety measure to the CoroutineData size +#define COROUTINE_DATA_OVERSIZE (64) + +//#define DEBUG_COROUTINES + +#ifdef DEBUG_COROUTINES +#define DEBUG_CORO_ONLY(x) x +#define DEBUG_CORO_PRINT(x) tty->print(x) +#else +#define DEBUG_CORO_ONLY(x) +#define DEBUG_CORO_PRINT(x) +#endif + +class Coroutine; +class CoroutineStack; + + +template +class DoublyLinkedList { +private: + T* _last; + T* _next; + +public: + DoublyLinkedList() { + _last = NULL; + _next = NULL; + } + + typedef T* pointer; + + void remove_from_list(pointer& list); + void insert_into_list(pointer& list); + + T* last() const { return _last; } + T* next() const { return _next; } +}; + +class FrameClosure: public StackObj { +public: + virtual void frames_do(frame* fr, RegisterMap* map) = 0; +}; + + +class Coroutine: public CHeapObj, public DoublyLinkedList { +public: + enum CoroutineState { + _onstack = 0x00000001, + _current = 0x00000002, + _dead = 0x00000003, // TODO is this really needed? + _dummy = 0xffffffff + }; + +private: + CoroutineState _state; + + bool _is_thread_coroutine; + + JavaThread* _thread; + CoroutineStack* _stack; + + ResourceArea* _resource_area; + HandleArea* _handle_area; + HandleMark* _last_handle_mark; +#ifdef ASSERT + int _java_call_counter; +#endif + +#ifdef _LP64 + intptr_t _storage[2]; +#endif + + // objects of this type can only be created via static functions + Coroutine() { } + virtual ~Coroutine() { } + + void frames_do(FrameClosure* fc); + +public: + void run(jobject coroutine); + + static Coroutine* create_thread_coroutine(JavaThread* thread, CoroutineStack* stack); + static Coroutine* create_coroutine(JavaThread* thread, CoroutineStack* stack, oop coroutineObj); + static void free_coroutine(Coroutine* coroutine, JavaThread* thread); + + CoroutineState state() const { return _state; } + void set_state(CoroutineState x) { _state = x; } + + bool is_thread_coroutine() const { return _is_thread_coroutine; } + + JavaThread* thread() const { return _thread; } + void set_thread(JavaThread* x) { _thread = x; } + + CoroutineStack* stack() const { return _stack; } + + ResourceArea* resource_area() const { return _resource_area; } + void set_resource_area(ResourceArea* x) { _resource_area = x; } + + HandleArea* handle_area() const { return _handle_area; } + void set_handle_area(HandleArea* x) { _handle_area = x; } + + HandleMark* last_handle_mark() const { return _last_handle_mark; } + void set_last_handle_mark(HandleMark* x){ _last_handle_mark = x; } + +#ifdef ASSERT + int java_call_counter() const { return _java_call_counter; } + void set_java_call_counter(int x) { _java_call_counter = x; } +#endif + + bool is_disposable(); + + // GC support + void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); + void nmethods_do(CodeBlobClosure* cf); + void metadata_do(void f(Metadata*)); + void frames_do(void f(frame*, const RegisterMap* map)); + + static ByteSize state_offset() { return byte_offset_of(Coroutine, _state); } + static ByteSize stack_offset() { return byte_offset_of(Coroutine, _stack); } + + static ByteSize resource_area_offset() { return byte_offset_of(Coroutine, _resource_area); } + static ByteSize handle_area_offset() { return byte_offset_of(Coroutine, _handle_area); } + static ByteSize last_handle_mark_offset() { return byte_offset_of(Coroutine, _last_handle_mark); } +#ifdef ASSERT + static ByteSize java_call_counter_offset() { return byte_offset_of(Coroutine, _java_call_counter); } +#endif + +#ifdef _LP64 + static ByteSize storage_offset() { return byte_offset_of(Coroutine, _storage); } +#endif + +#if defined(_WINDOWS) +private: + address _last_SEH; +public: + static ByteSize last_SEH_offset() { return byte_offset_of(Coroutine, _last_SEH); } +#endif +}; + +class CoroutineStack: public CHeapObj, public DoublyLinkedList { +private: + JavaThread* _thread; + + bool _is_thread_stack; + ReservedSpace _reserved_space; + VirtualSpace _virtual_space; + + address _stack_base; + intptr_t _stack_size; + bool _default_size; + + address _last_sp; + + // objects of this type can only be created via static functions + CoroutineStack(intptr_t size) : _reserved_space(size) { } + virtual ~CoroutineStack() { } + +public: + static CoroutineStack* create_thread_stack(JavaThread* thread); + static CoroutineStack* create_stack(JavaThread* thread, intptr_t size = -1); + static void free_stack(CoroutineStack* stack, JavaThread* THREAD); + + static intptr_t get_start_method(); + + JavaThread* thread() const { return _thread; } + bool is_thread_stack() const { return _is_thread_stack; } + + address last_sp() const { return _last_sp; } + void set_last_sp(address x) { _last_sp = x; } + + address stack_base() const { return _stack_base; } + intptr_t stack_size() const { return _stack_size; } + bool is_default_size() const { return _default_size; } + + frame last_frame(Coroutine* coro, RegisterMap& map) const; + + // GC support + void frames_do(FrameClosure* fc); + + static ByteSize stack_base_offset() { return byte_offset_of(CoroutineStack, _stack_base); } + static ByteSize stack_size_offset() { return byte_offset_of(CoroutineStack, _stack_size); } + static ByteSize last_sp_offset() { return byte_offset_of(CoroutineStack, _last_sp); } +}; + +template void DoublyLinkedList::remove_from_list(pointer& list) { + if (list == this) { + if (list->_next == list) + list = NULL; + else + list = list->_next; + } + _last->_next = _next; + _next->_last = _last; + _last = NULL; + _next = NULL; +} + +template void DoublyLinkedList::insert_into_list(pointer& list) { + if (list == NULL) { + _next = (T*)this; + _last = (T*)this; + list = (T*)this; + } else { + _next = list->_next; + list->_next = (T*)this; + _last = list; + _next->_last = (T*)this; + } +} + +#endif // SHARE_VM_RUNTIME_COROUTINE_HPP diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp index 86ee69c80..76bd2a0aa 100644 --- a/src/share/vm/runtime/frame.cpp +++ b/src/share/vm/runtime/frame.cpp @@ -1458,6 +1458,11 @@ StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(t _is_done = false; } +StackFrameStream::StackFrameStream(JavaThread *thread, frame last_frame, bool update) : _reg_map(thread, update) { + _fr = last_frame; + _is_done = false; +} + #ifndef PRODUCT diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp index 04cc8a496..b930195c0 100644 --- a/src/share/vm/runtime/frame.hpp +++ b/src/share/vm/runtime/frame.hpp @@ -559,6 +559,7 @@ class StackFrameStream : public StackObj { bool _is_done; public: StackFrameStream(JavaThread *thread, bool update = true); + StackFrameStream(JavaThread *thread, frame last_frame, bool update = true); // Iteration bool is_done() { return (_is_done) ? true : (_is_done = _fr.is_first_frame(), false); } diff --git a/src/share/vm/runtime/globals_ext.hpp b/src/share/vm/runtime/globals_ext.hpp index 03802c736..208b8e472 100644 --- a/src/share/vm/runtime/globals_ext.hpp +++ b/src/share/vm/runtime/globals_ext.hpp @@ -114,7 +114,15 @@ \ product(bool, TenantDataIsolation, false, \ "Enable data isolation(e.g static vairable) per tenant") \ - + \ + product(bool, EnableCoroutine, false, \ + "Enable coroutine support") \ + \ + product(uintx, DefaultCoroutineStackSize, 128*K, \ + "Default size of stack that is associated with new coroutine") \ + \ + product(uintx, MaxFreeCoroutinesCacheSize, 20, \ + "The max number of free coroutine stacks a thread can keep") \ //add new AJVM specific flags here diff --git a/src/share/vm/runtime/handles.cpp b/src/share/vm/runtime/handles.cpp index 5d0886431..db57d60b8 100644 --- a/src/share/vm/runtime/handles.cpp +++ b/src/share/vm/runtime/handles.cpp @@ -124,6 +124,23 @@ void HandleMark::initialize(Thread* thread) { thread->set_last_handle_mark(this); } +HandleMark::HandleMark(Thread* thread, HandleArea* area, HandleMark* last_handle_mark) { + _thread = thread; + // Save area + _area = area; + // Save current top + _chunk = _area->_chunk; + _hwm = _area->_hwm; + _max = _area->_max; + NOT_PRODUCT(_size_in_bytes = _area->_size_in_bytes;) + debug_only(_area->_handle_mark_nesting++); + assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks"); + debug_only(Atomic::inc(&_nof_handlemarks);) + + // Link this in the thread + set_previous_handle_mark(last_handle_mark); +} + HandleMark::~HandleMark() { HandleArea* area = _area; // help compilers with poor alias analysis diff --git a/src/share/vm/runtime/handles.hpp b/src/share/vm/runtime/handles.hpp index 8f3d6c1f5..97d51da79 100644 --- a/src/share/vm/runtime/handles.hpp +++ b/src/share/vm/runtime/handles.hpp @@ -233,6 +233,13 @@ class HandleArea: public Arena { _prev = prev; } + HandleArea(HandleArea* prev, size_t init_size) : Arena(mtThread, init_size) { + assert(EnableCoroutine, "EnableCoroutine is off"); + debug_only(_handle_mark_nesting = 0); + debug_only(_no_handle_mark_nesting = 0); + _prev = prev; + } + // Handle allocation private: oop* real_allocate_handle(oop obj) { @@ -301,6 +308,7 @@ class HandleMark { public: HandleMark(); // see handles_inline.hpp HandleMark(Thread* thread) { initialize(thread); } + HandleMark(Thread* thread, HandleArea* area, HandleMark* last_handle_mark); ~HandleMark(); // Functions used by HandleMarkCleaner diff --git a/src/share/vm/runtime/javaCalls.cpp b/src/share/vm/runtime/javaCalls.cpp index 12925a9aa..21d514c5c 100644 --- a/src/share/vm/runtime/javaCalls.cpp +++ b/src/share/vm/runtime/javaCalls.cpp @@ -107,6 +107,15 @@ JavaCallWrapper::JavaCallWrapper(methodHandle callee_method, Handle receiver, Ja } } +void JavaCallWrapper::initialize(JavaThread* thread, JNIHandleBlock* handles, Method* callee_method, oop receiver, JavaValue* result) { + _thread = thread; + _handles = handles; + _callee_method = callee_method; + _receiver = receiver; + _result = result; + _anchor.clear(); +} + JavaCallWrapper::~JavaCallWrapper() { assert(_thread == JavaThread::current(), "must still be the same thread"); diff --git a/src/share/vm/runtime/javaCalls.hpp b/src/share/vm/runtime/javaCalls.hpp index 4af6a8c4b..541955844 100644 --- a/src/share/vm/runtime/javaCalls.hpp +++ b/src/share/vm/runtime/javaCalls.hpp @@ -68,6 +68,8 @@ class JavaCallWrapper: StackObj { JavaCallWrapper(methodHandle callee_method, Handle receiver, JavaValue* result, TRAPS); ~JavaCallWrapper(); + void initialize(JavaThread* thread, JNIHandleBlock* handles, Method* callee_method, oop receiver, JavaValue* result); + // Accessors JavaThread* thread() const { return _thread; } JNIHandleBlock* handles() const { return _handles; } diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp index 5a9d8b1ea..1996b0743 100644 --- a/src/share/vm/runtime/thread.cpp +++ b/src/share/vm/runtime/thread.cpp @@ -50,6 +50,7 @@ #include "prims/privilegedStack.hpp" #include "runtime/arguments.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/coroutine.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fprofiler.hpp" #include "runtime/frame.inline.hpp" @@ -1494,6 +1495,12 @@ void JavaThread::initialize() { _interp_only_mode = 0; _special_runtime_exit_condition = _no_async_condition; _pending_async_exception = NULL; + + _coroutine_stack_cache = NULL; + _coroutine_stack_cache_size = 0; + _coroutine_stack_list = NULL; + _coroutine_list = NULL; + _thread_stat = NULL; _thread_stat = new ThreadStatistics(); _blocked_on_compilation = false; @@ -1629,6 +1636,13 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : } JavaThread::~JavaThread() { + + while (EnableCoroutine && coroutine_stack_cache() != NULL) { + CoroutineStack* stack = coroutine_stack_cache(); + stack->remove_from_list(coroutine_stack_cache()); + CoroutineStack::free_stack(stack, this); + } + if (TraceThreadEvents) { tty->print_cr("terminate thread %p", this); } @@ -1678,6 +1692,9 @@ void JavaThread::run() { // Record real stack base and size. this->record_stack_base_and_size(); + if (EnableCoroutine) { + this->initialize_coroutine_support(); + } // Initialize thread local storage; set before calling MutexLocker this->initialize_thread_local_storage(); @@ -2840,6 +2857,14 @@ void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) { frame* fr = fst.current(); f(fr, fst.register_map()); } + if (EnableCoroutine) { + // traverse the coroutine stack frames + Coroutine* current = _coroutine_list; + do { + current->frames_do(f); + current = current->next(); + } while (current != _coroutine_list); + } } @@ -3003,6 +3028,15 @@ void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) } } + + if (EnableCoroutine) { + Coroutine* current = _coroutine_list; + do { + current->oops_do(f, cld_f, cf); + current = current->next(); + } while (current != _coroutine_list); + } + // callee_target is never live across a gc point so NULL it here should // it still contain a methdOop. @@ -3043,6 +3077,14 @@ void JavaThread::nmethods_do(CodeBlobClosure* cf) { fst.current()->nmethods_do(cf); } } + + if (EnableCoroutine) { + Coroutine* current = _coroutine_list; + do { + current->nmethods_do(cf); + current = current->next(); + } while (current != _coroutine_list); + } } void JavaThread::metadata_do(void f(Metadata*)) { @@ -3059,6 +3101,14 @@ void JavaThread::metadata_do(void f(Metadata*)) { ct->env()->metadata_do(f); } } + + if (EnableCoroutine) { + Coroutine* current = _coroutine_list; + do { + current->metadata_do(f); + current = current->next(); + } while (current != _coroutine_list); + } } // Printing @@ -3652,6 +3702,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { // stacksize. This adjusted size is what is used to figure the placement // of the guard pages. main_thread->record_stack_base_and_size(); + if (EnableCoroutine) { + main_thread->initialize_coroutine_support(); + } main_thread->initialize_thread_local_storage(); main_thread->set_active_handles(JNIHandleBlock::allocate_block()); @@ -4979,3 +5032,8 @@ void Threads::verify() { VMThread* thread = VMThread::vm_thread(); if (thread != NULL) thread->verify(); } + +void JavaThread::initialize_coroutine_support() { + CoroutineStack::create_thread_stack(this)->insert_into_list(_coroutine_stack_list); + Coroutine::create_thread_coroutine(this, _coroutine_stack_list)->insert_into_list(_coroutine_list); +} \ No newline at end of file diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp index ee6316fd6..6f49aa6a3 100644 --- a/src/share/vm/runtime/thread.hpp +++ b/src/share/vm/runtime/thread.hpp @@ -94,6 +94,10 @@ DEBUG_ONLY(class ResourceMark;) class WorkerThread; + +class Coroutine; +class CoroutineStack; + // Class hierarchy // - Thread // - NamedThread @@ -627,6 +631,10 @@ protected: void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } + static ByteSize resource_area_offset() { return byte_offset_of(Thread, _resource_area); } + static ByteSize handle_area_offset() { return byte_offset_of(Thread, _handle_area); } + static ByteSize last_handle_mark_offset() { return byte_offset_of(Thread, _last_handle_mark); } + // Code generation static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file ); } static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line ); } @@ -967,6 +975,26 @@ class JavaThread: public Thread { // failed reallocations. int _frames_to_pop_failed_realloc; + // coroutine support + CoroutineStack* _coroutine_stack_cache; + uintx _coroutine_stack_cache_size; + CoroutineStack* _coroutine_stack_list; + Coroutine* _coroutine_list; + + intptr_t _coroutine_temp; + + public: + CoroutineStack*& coroutine_stack_cache() { return _coroutine_stack_cache; } + uintx& coroutine_stack_cache_size() { return _coroutine_stack_cache_size; } + CoroutineStack*& coroutine_stack_list() { return _coroutine_stack_list; } + Coroutine*& coroutine_list() { return _coroutine_list; } + + static ByteSize coroutine_temp_offset() { return byte_offset_of(JavaThread, _coroutine_temp); } + + void initialize_coroutine_support(); + + private: + #ifndef PRODUCT int _jmp_ring_index; struct { @@ -1411,6 +1439,9 @@ class JavaThread: public Thread { static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); } static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); } +#ifdef ASSERT + static ByteSize java_call_counter_offset() { return byte_offset_of(JavaThread, _java_call_counter); } +#endif static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } static ByteSize should_post_on_exceptions_flag_offset() { diff --git a/src/share/vm/runtime/threadLocalStorage.cpp b/src/share/vm/runtime/threadLocalStorage.cpp index 30dacbb6a..5c6b9bc34 100644 --- a/src/share/vm/runtime/threadLocalStorage.cpp +++ b/src/share/vm/runtime/threadLocalStorage.cpp @@ -59,6 +59,14 @@ void ThreadLocalStorage::set_thread(Thread* thread) { guarantee(get_thread_slow() == thread, "must be the same thread, slowly"); } +void ThreadLocalStorage::add_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { + pd_add_coroutine_stack(thread, stack_base, stack_size); +} + +void ThreadLocalStorage::remove_coroutine_stack(Thread* thread, address stack_base, size_t stack_size) { + pd_remove_coroutine_stack(thread, stack_base, stack_size); +} + void ThreadLocalStorage::init() { assert(!is_initialized(), "More than one attempt to initialize threadLocalStorage"); diff --git a/src/share/vm/runtime/threadLocalStorage.hpp b/src/share/vm/runtime/threadLocalStorage.hpp index 4883ee5b1..c32ae6b9a 100644 --- a/src/share/vm/runtime/threadLocalStorage.hpp +++ b/src/share/vm/runtime/threadLocalStorage.hpp @@ -47,6 +47,9 @@ class ThreadLocalStorage : AllStatic { static void init(); static bool is_initialized(); + static void add_coroutine_stack(Thread* thread, address stack_base, size_t stack_size); + static void remove_coroutine_stack(Thread* thread, address stack_base, size_t stack_size); + // Machine dependent stuff #ifdef TARGET_OS_ARCH_linux_x86 # include "threadLS_linux_x86.hpp" @@ -97,6 +100,9 @@ class ThreadLocalStorage : AllStatic { static void pd_set_thread(Thread* thread); static void pd_init(); + static void pd_add_coroutine_stack(Thread* thread, address stack_base, size_t stack_size); + static void pd_remove_coroutine_stack(Thread* thread, address stack_base, size_t stack_size); + #endif // SOLARIS // Invalidate any thread cacheing or optimization schemes. diff --git a/src/share/vm/utilities/debug.cpp b/src/share/vm/utilities/debug.cpp index 4f7cbddcd..8c65fdd7e 100644 --- a/src/share/vm/utilities/debug.cpp +++ b/src/share/vm/utilities/debug.cpp @@ -112,6 +112,16 @@ void warning(const char* format, ...) { if (BreakAtWarning) BREAKPOINT; } +void warning_fixed_args(const char* message) { + // In case error happens before init or during shutdown + if (tty == NULL) ostream_init(); + + tty->print("%s warning: ", VM_Version::vm_name()); + tty->print_cr("%s", message); + if (BreakAtWarning) BREAKPOINT; +} + + #ifndef PRODUCT #define is_token_break(ch) (isspace(ch) || (ch) == ',') diff --git a/src/share/vm/utilities/debug.hpp b/src/share/vm/utilities/debug.hpp index 86fc6b18d..32a877af4 100644 --- a/src/share/vm/utilities/debug.hpp +++ b/src/share/vm/utilities/debug.hpp @@ -225,6 +225,7 @@ void report_untested(const char* file, int line, const char* message); void report_insufficient_metaspace(size_t required_size); void warning(const char* format, ...) ATTRIBUTE_PRINTF(1, 2); +void warning_fixed_args(const char* message); #ifdef ASSERT // Compile-time asserts. -- GitLab