提交 660fff36 编写于 作者: K kvn

Merge

......@@ -2573,6 +2573,13 @@ void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
emit_byte(0xC0 | encode);
}
void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66);
emit_byte(0x6C);
emit_byte(0xC0 | encode);
}
void Assembler::push(int32_t imm32) {
// in 64bits we push 64bits onto the stack but only
// take a 32bit immediate
......@@ -3178,6 +3185,13 @@ void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool v
emit_byte(0xC0 | encode);
}
void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
assert(VM_Version::supports_avx2() || (!vector256) && VM_Version::supports_avx(), "");
int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256);
emit_byte(0xEF);
emit_byte(0xC0 | encode);
}
void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
bool vector256 = true;
......@@ -3189,6 +3203,17 @@ void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src)
emit_byte(0x01);
}
void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx2(), "");
bool vector256 = true;
int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
emit_byte(0x38);
emit_byte(0xC0 | encode);
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits
emit_byte(0x01);
}
void Assembler::vzeroupper() {
assert(VM_Version::supports_avx(), "");
(void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
......@@ -7480,6 +7505,24 @@ void MacroAssembler::movbyte(ArrayAddress dst, int src) {
movb(as_Address(dst), src);
}
void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
movdl(dst, as_Address(src));
} else {
lea(rscratch1, src);
movdl(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
movq(dst, as_Address(src));
} else {
lea(rscratch1, src);
movq(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
if (UseXmmLoadAndClearUpper) {
......
......@@ -1466,6 +1466,9 @@ private:
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckldq(XMMRegister dst, Address src);
// Interleave Low Quadwords
void punpcklqdq(XMMRegister dst, XMMRegister src);
#ifndef _LP64 // no 32bit push/pop on amd64
void pushl(Address src);
#endif
......@@ -1606,13 +1609,11 @@ private:
void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
// AVX 3-operands instructions (encoded with VEX prefix)
// AVX 3-operands scalar instructions (encoded with VEX prefix)
void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vaddss(XMMRegister dst, XMMRegister nds, Address src);
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src);
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vdivss(XMMRegister dst, XMMRegister nds, Address src);
......@@ -1625,13 +1626,17 @@ private:
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vsubss(XMMRegister dst, XMMRegister nds, Address src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
// AVX Vector instrucitons.
void vandpd(XMMRegister dst, XMMRegister nds, Address src);
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
// AVX instruction which is used to clear upper 128 bits of YMM registers and
// to avoid transaction penalty between AVX and SSE states. There is no
......@@ -2563,6 +2568,20 @@ public:
void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
Assembler::vpxor(dst, nds, src, vector256);
else
Assembler::vxorpd(dst, nds, src, vector256);
}
// Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
if (UseAVX > 1) // vinserti128h is available only in AVX2
Assembler::vinserti128h(dst, nds, src);
else
Assembler::vinsertf128h(dst, nds, src);
}
// Data
......@@ -2615,6 +2634,13 @@ public:
// to avoid hiding movb
void movbyte(ArrayAddress dst, int src);
// Import other mov() methods from the parent class or else
// they will be hidden by the following overriding declaration.
using Assembler::movdl;
using Assembler::movq;
void movdl(XMMRegister dst, AddressLiteral src);
void movq(XMMRegister dst, AddressLiteral src);
// Can push value or effective address
void pushptr(AddressLiteral src);
......
......@@ -562,7 +562,7 @@ void VM_Version::get_processor_features() {
AllocatePrefetchInstr = 3;
}
// On family 15h processors use XMM and UnalignedLoadStores for Array Copy
if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
if( supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
UseXMMForArrayCopy = true;
}
if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
......
此差异已折叠。
......@@ -3505,8 +3505,10 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
}
// now perform tests that are based on flag settings
if (callee->should_inline()) {
if (callee->force_inline() || callee->should_inline()) {
// ignore heuristic controls on inlining
if (callee->force_inline())
CompileTask::print_inlining(callee, scope()->level(), bci(), "force inline by annotation");
} else {
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining");
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
......@@ -3531,7 +3533,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
}
#ifndef PRODUCT
// printing
// printing
if (PrintInlining) {
print_inline_result(callee, true);
}
......
......@@ -160,6 +160,8 @@ class ciMethod : public ciObject {
// Code size for inlining decisions.
int code_size_for_inlining();
bool force_inline() { return get_methodOop()->force_inline(); }
int comp_level();
int highest_osr_comp_level();
......
......@@ -31,8 +31,8 @@
#include "oops/typeArrayOop.hpp"
#include "runtime/handles.inline.hpp"
#include "utilities/accessFlags.hpp"
#include "classfile/symbolTable.hpp"
class TempNewSymbol;
class FieldAllocationCount;
......@@ -50,11 +50,80 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
KlassHandle _host_klass;
GrowableArray<Handle>* _cp_patches; // overrides for CP entries
// precomputed flags
bool _has_finalizer;
bool _has_empty_finalizer;
bool _has_vanilla_constructor;
int _max_bootstrap_specifier_index;
int _max_bootstrap_specifier_index; // detects BSS values
// class attributes parsed before the instance klass is created:
bool _synthetic_flag;
Symbol* _sourcefile;
Symbol* _generic_signature;
char* _sde_buffer;
int _sde_length;
typeArrayHandle _inner_classes;
typeArrayHandle _annotations;
void set_class_synthetic_flag(bool x) { _synthetic_flag = x; }
void set_class_sourcefile(Symbol* x) { _sourcefile = x; }
void set_class_generic_signature(Symbol* x) { _generic_signature = x; }
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
void set_class_inner_classes(typeArrayHandle x) { _inner_classes = x; }
void set_class_annotations(typeArrayHandle x) { _annotations = x; }
void init_parsed_class_attributes() {
_synthetic_flag = false;
_sourcefile = NULL;
_generic_signature = NULL;
_sde_buffer = NULL;
_sde_length = 0;
// initialize the other flags too:
_has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
_max_bootstrap_specifier_index = -1;
}
void apply_parsed_class_attributes(instanceKlassHandle k); // update k
class AnnotationCollector {
public:
enum Location { _in_field, _in_method, _in_class };
enum ID {
_unknown = 0,
_method_ForceInline,
_annotation_LIMIT
};
const Location _location;
int _annotations_present;
AnnotationCollector(Location location)
: _location(location), _annotations_present(0)
{
assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
}
// If this annotation name has an ID, report it (or _none).
ID annotation_index(Symbol* name);
// Set the annotation name:
void set_annotation(ID id) {
assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
_annotations_present |= nth_bit((int)id);
}
// Report if the annotation is present.
bool has_any_annotations() { return _annotations_present != 0; }
bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; }
};
class FieldAnnotationCollector: public AnnotationCollector {
public:
FieldAnnotationCollector() : AnnotationCollector(_in_field) { }
void apply_to(FieldInfo* f);
};
class MethodAnnotationCollector: public AnnotationCollector {
public:
MethodAnnotationCollector() : AnnotationCollector(_in_method) { }
void apply_to(methodHandle m);
};
class ClassAnnotationCollector: public AnnotationCollector {
public:
ClassAnnotationCollector() : AnnotationCollector(_in_class) { }
void apply_to(instanceKlassHandle k);
};
enum { fixed_buffer_size = 128 };
u_char linenumbertable_buffer[fixed_buffer_size];
......@@ -87,7 +156,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
u2* constantvalue_index_addr,
bool* is_synthetic_addr,
u2* generic_signature_index_addr,
typeArrayHandle* field_annotations, TRAPS);
typeArrayHandle* field_annotations,
FieldAnnotationCollector* parsed_annotations,
TRAPS);
typeArrayHandle parse_fields(Symbol* class_name,
constantPoolHandle cp, bool is_interface,
FieldAllocationCount *fac,
......@@ -128,25 +199,32 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
typeArrayOop parse_stackmap_table(u4 code_attribute_length, TRAPS);
// Classfile attribute parsing
void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
instanceKlassHandle k, int length, TRAPS);
void parse_classfile_sourcefile_attribute(constantPoolHandle cp, TRAPS);
void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, int length, TRAPS);
u2 parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
bool parsed_enclosingmethod_attribute,
u2 enclosing_method_class_index,
u2 enclosing_method_method_index,
constantPoolHandle cp,
instanceKlassHandle k, TRAPS);
void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, instanceKlassHandle k, u4 attribute_length, TRAPS);
TRAPS);
void parse_classfile_attributes(constantPoolHandle cp,
ClassAnnotationCollector* parsed_annotations,
TRAPS);
void parse_classfile_synthetic_attribute(constantPoolHandle cp, TRAPS);
void parse_classfile_signature_attribute(constantPoolHandle cp, TRAPS);
void parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, u4 attribute_length, TRAPS);
// Annotations handling
typeArrayHandle assemble_annotations(u1* runtime_visible_annotations,
int runtime_visible_annotations_length,
u1* runtime_invisible_annotations,
int runtime_invisible_annotations_length, TRAPS);
int skip_annotation(u1* buffer, int limit, int index);
int skip_annotation_value(u1* buffer, int limit, int index);
void parse_annotations(u1* buffer, int limit, constantPoolHandle cp,
/* Results (currently, only one result is supported): */
AnnotationCollector* result,
TRAPS);
// Final setup
unsigned int compute_oop_map_count(instanceKlassHandle super,
......
......@@ -2738,17 +2738,6 @@ void java_lang_invoke_CallSite::compute_offsets() {
if (k != NULL) {
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
}
// Disallow compilation of CallSite.setTargetNormal and CallSite.setTargetVolatile
// (For C2: keep this until we have throttling logic for uncommon traps.)
if (k != NULL) {
instanceKlass* ik = instanceKlass::cast(k);
methodOop m_normal = ik->lookup_method(vmSymbols::setTargetNormal_name(), vmSymbols::setTarget_signature());
methodOop m_volatile = ik->lookup_method(vmSymbols::setTargetVolatile_name(), vmSymbols::setTarget_signature());
guarantee(m_normal != NULL && m_volatile != NULL, "must exist");
m_normal->set_not_compilable_quietly();
m_volatile->set_not_compilable_quietly();
}
}
......
......@@ -257,6 +257,7 @@
template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
template(java_lang_invoke_CountingMethodHandle, "java/lang/invoke/CountingMethodHandle") \
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
......
......@@ -131,6 +131,10 @@ public:
assert((is_reg() && value() < stack0->value() - 1) || is_stack(), "must be");
return (VMReg)(intptr_t)(value() + 1);
}
VMReg next(int i) {
assert((is_reg() && value() < stack0->value() - i) || is_stack(), "must be");
return (VMReg)(intptr_t)(value() + i);
}
VMReg prev() {
assert((is_stack() && value() > stack0->value()) || (is_reg() && value() != 0), "must be");
return (VMReg)(intptr_t)(value() - 1);
......
......@@ -547,23 +547,6 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
}
}
if (is_put && !is_static && klass->is_subclass_of(SystemDictionary::CallSite_klass()) && (info.name() == vmSymbols::target_name())) {
const jint direction = frame::interpreter_frame_expression_stack_direction();
Handle call_site (THREAD, *((oop*) thread->last_frame().interpreter_frame_tos_at(-1 * direction)));
Handle method_handle(THREAD, *((oop*) thread->last_frame().interpreter_frame_tos_at( 0 * direction)));
assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "must be");
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
{
// Walk all nmethods depending on this call site.
MutexLocker mu(Compile_lock, thread);
Universe::flush_dependents_on(call_site, method_handle);
}
// Don't allow fast path for setting CallSite.target and sub-classes.
put_code = (Bytecodes::Code) 0;
}
cache_entry(thread)->set_field(
get_code,
put_code,
......
......@@ -122,8 +122,9 @@ class methodOopDesc : public oopDesc {
u2 _max_locals; // Number of local variables used by this method
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
u1 _jfr_towrite : 1, // Flags
: 7;
u1 _jfr_towrite : 1, // Flags
_force_inline : 1,
: 6;
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
u2 _number_of_breakpoints; // fullspeed debugging support
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
......@@ -655,6 +656,9 @@ class methodOopDesc : public oopDesc {
bool jfr_towrite() { return _jfr_towrite; }
void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
bool force_inline() { return _force_inline; }
void set_force_inline(bool fi) { _force_inline = fi; }
// On-stack replacement support
bool has_osr_nmethod(int level, bool match_level) {
return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
......
......@@ -533,7 +533,17 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
if (csj->method() != NULL &&
csj->method()->intrinsic_id() == vmIntrinsics::_Integer_toString) {
csj->method()->intrinsic_id() == vmIntrinsics::_Integer_toString &&
arg->outcnt() == 1) {
// _control is the list of StringBuilder calls nodes which
// will be replaced by new String code after this optimization.
// Integer::toString() call is not part of StringBuilder calls
// chain. It could be eliminated only if its result is used
// only by this SB calls chain.
// Another limitation: it should be used only once because
// it is unknown that it is used only by this SB calls chain
// until all related SB calls nodes are collected.
assert(arg->unique_out() == cnode, "sanity");
sc->add_control(csj);
sc->push_int(csj->in(TypeFunc::Parms));
continue;
......
......@@ -3180,17 +3180,15 @@ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class))
jclass MH_class = env->FindClass(MH_name);
status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod));
}
if (!env->ExceptionOccurred()) {
status = env->RegisterNatives(MHN_class, call_site_methods, sizeof(call_site_methods)/sizeof(JNINativeMethod));
}
if (env->ExceptionOccurred()) {
warning("JSR 292 method handle code is mismatched to this JVM. Disabling support.");
enable_MH = false;
env->ExceptionClear();
}
status = env->RegisterNatives(MHN_class, call_site_methods, sizeof(call_site_methods)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
// Exception is okay until 7087357
env->ExceptionClear();
}
}
if (enable_MH) {
......
......@@ -178,17 +178,6 @@ jint Unsafe_invocation_key_to_method_slot(jint key) {
v = *(oop*)index_oop_from_field_offset_long(p, offset); \
}
#define GET_OOP_FIELD_VOLATILE(obj, offset, v) \
oop p = JNIHandles::resolve(obj); \
volatile oop v; \
if (UseCompressedOops) { \
volatile narrowOop n = *(volatile narrowOop*)index_oop_from_field_offset_long(p, offset); \
v = oopDesc::decode_heap_oop(n); \
} else { \
v = *(volatile oop*)index_oop_from_field_offset_long(p, offset); \
} \
OrderAccess::acquire();
// Get/SetObject must be special-cased, since it works with handles.
......@@ -296,28 +285,21 @@ UNSAFE_END
UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetObjectVolatile");
GET_OOP_FIELD_VOLATILE(obj, offset, v)
oop p = JNIHandles::resolve(obj);
void* addr = index_oop_from_field_offset_long(p, offset);
volatile oop v;
if (UseCompressedOops) {
volatile narrowOop n = *(volatile narrowOop*) addr;
v = oopDesc::decode_heap_oop(n);
} else {
v = *(volatile oop*) addr;
}
OrderAccess::acquire();
return JNIHandles::make_local(env, v);
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
UnsafeWrapper("Unsafe_SetObjectVolatile");
{
// Catch VolatileCallSite.target stores (via
// CallSite.setTargetVolatile) and check call site dependencies.
oop p = JNIHandles::resolve(obj);
if ((offset == java_lang_invoke_CallSite::target_offset_in_bytes()) && p->is_a(SystemDictionary::CallSite_klass())) {
Handle call_site (THREAD, p);
Handle method_handle(THREAD, JNIHandles::resolve(x_h));
assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "must be");
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
{
// Walk all nmethods depending on this call site.
MutexLocker mu(Compile_lock, thread);
Universe::flush_dependents_on(call_site(), method_handle());
}
}
}
oop x = JNIHandles::resolve(x_h);
oop p = JNIHandles::resolve(obj);
void* addr = index_oop_from_field_offset_long(p, offset);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册