提交 e3e4f6b0 编写于 作者: M minqi

Merge

......@@ -299,3 +299,7 @@ cfc5309f03b7bd6c1567618b63cf1fc74c0f2a8f hs25-b10
b61d9c88b759d1594b8af1655598e8fa00393672 hs25-b11
25bdce771bb3a7ae9825261a284d292cda700122 jdk8-b67
a35a72dd2e1255239d31f796f9f693e49b36bc9f hs25-b12
121aa71316af6cd877bf455e775fa3fdbcdd4b65 jdk8-b68
b6c9c0109a608eedbb6b868d260952990e3c91fe hs25-b13
cb8a4e04bc8c104de8a2f67463c7e31232bf8d68 jdk8-b69
990bbd393c239d95310ccc38094e57923bbf1d4a hs25-b14
......@@ -69,6 +69,8 @@ public class ConstMethod extends VMObject {
signatureIndex = new CIntField(type.getCIntegerField("_signature_index"), 0);
idnum = new CIntField(type.getCIntegerField("_method_idnum"), 0);
maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
// start of byte code
bytecodeOffset = type.getSize();
......@@ -96,6 +98,8 @@ public class ConstMethod extends VMObject {
private static CIntField signatureIndex;
private static CIntField idnum;
private static CIntField maxStack;
private static CIntField maxLocals;
private static CIntField sizeOfParameters;
// start of bytecode
private static long bytecodeOffset;
......@@ -151,6 +155,14 @@ public class ConstMethod extends VMObject {
return maxStack.getValue(this);
}
public long getMaxLocals() {
return maxLocals.getValue(this);
}
public long getSizeOfParameters() {
return sizeOfParameters.getValue(this);
}
public Symbol getName() {
return getMethod().getName();
}
......@@ -247,6 +259,8 @@ public class ConstMethod extends VMObject {
visitor.doCInt(signatureIndex, true);
visitor.doCInt(codeSize, true);
visitor.doCInt(maxStack, true);
visitor.doCInt(maxLocals, true);
visitor.doCInt(sizeOfParameters, true);
}
// Accessors
......
......@@ -50,8 +50,6 @@ public class Method extends Metadata {
constMethod = type.getAddressField("_constMethod");
methodData = type.getAddressField("_method_data");
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
code = type.getAddressField("_code");
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
......@@ -83,8 +81,6 @@ public class Method extends Metadata {
private static AddressField constMethod;
private static AddressField methodData;
private static CIntField methodSize;
private static CIntField maxLocals;
private static CIntField sizeOfParameters;
private static CIntField accessFlags;
private static CIntField vtableIndex;
private static CIntField invocationCounter;
......@@ -134,8 +130,8 @@ public class Method extends Metadata {
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
public long getMethodSize() { return methodSize.getValue(this); }
public long getMaxStack() { return getConstMethod().getMaxStack(); }
public long getMaxLocals() { return maxLocals.getValue(this); }
public long getSizeOfParameters() { return sizeOfParameters.getValue(this); }
public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
public long getNameIndex() { return getConstMethod().getNameIndex(); }
public long getSignatureIndex() { return getConstMethod().getSignatureIndex(); }
public long getGenericSignatureIndex() { return getConstMethod().getGenericSignatureIndex(); }
......@@ -282,8 +278,6 @@ public class Method extends Metadata {
public void iterateFields(MetadataVisitor visitor) {
visitor.doCInt(methodSize, true);
visitor.doCInt(maxLocals, true);
visitor.doCInt(sizeOfParameters, true);
visitor.doCInt(accessFlags, true);
}
......
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=13
HS_BUILD_NUMBER=15
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -298,7 +298,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
for (int i = 0; i < _bytes_to_copy; i++) {
address ptr = (address)(_pc_start + i);
int a_byte = (*ptr) & 0xFF;
__ a_byte (a_byte);
__ emit_int8 (a_byte);
}
}
......@@ -340,10 +340,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
// Emit the patch record. We need to emit a full word, so emit an extra empty byte
__ a_byte(0);
__ a_byte(being_initialized_entry_offset);
__ a_byte(bytes_to_skip);
__ a_byte(_bytes_to_copy);
__ emit_int8(0);
__ emit_int8(being_initialized_entry_offset);
__ emit_int8(bytes_to_skip);
__ emit_int8(_bytes_to_copy);
address patch_info_pc = __ pc();
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
......
......@@ -582,7 +582,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// the following temporary registers are used during frame creation
const Register Gtmp1 = G3_scratch ;
const Register Gtmp2 = G1_scratch;
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Register RconstMethod = Gtmp1;
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
bool inc_counter = UseCompiler || CountCompiledCalls;
......@@ -618,6 +620,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
}
#endif // ASSERT
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, Gtmp1);
__ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
__ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
......@@ -1047,8 +1050,6 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
const Register Gtmp = G3_scratch;
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
// slop factor is two extra slots on the expression stack so that
// we always have room to store a result when returning from a call without parameters
......@@ -1066,6 +1067,9 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// Now compute new frame size
if (native) {
const Register RconstMethod = Gtmp;
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh( size_of_parameters, Gtmp );
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
} else {
......@@ -1236,9 +1240,13 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
}
if (init_value != noreg) {
Label clear_loop;
const Register RconstMethod = O1;
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
// NOTE: If you change the frame layout, this code will need to
// be updated!
__ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_locals, O2 );
__ lduh( size_of_parameters, O1 );
__ sll( O2, LogBytesPerWord, O2);
......@@ -1483,13 +1491,16 @@ void CppInterpreterGenerator::adjust_callers_stack(Register args) {
//
// assert_different_registers(state, prev_state);
const Register Gtmp = G3_scratch;
const RconstMethod = G3_scratch;
const Register tmp = O2;
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, tmp);
__ sll(tmp, LogBytesPerWord, Gtmp); // parameter size in bytes
__ add(args, Gtmp, Gargs); // points to first local + BytesPerWord
__ sll(tmp, LogBytesPerWord, Gargs); // parameter size in bytes
__ add(args, Gargs, Gargs); // points to first local + BytesPerWord
// NEW
__ add(Gargs, -wordSize, Gargs); // points to first local[0]
// determine extra space for non-argument locals & adjust caller's SP
......@@ -1541,8 +1552,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
address entry_point = __ pc();
__ mov(G0, prevState); // no current activation
......@@ -1750,7 +1759,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method
__ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
__ lduh(L4_scratch, in_bytes(Method::size_of_parameters_offset()), L2_scratch); // get parameter size
// get parameter size
__ ld_ptr(L4_scratch, in_bytes(Method::const_offset()), L2_scratch);
__ lduh(L2_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), L2_scratch);
__ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes
__ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result
__ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index
......
......@@ -100,34 +100,6 @@ const char* Argument::name() const {
bool AbstractAssembler::pd_check_instruction_mark() { return false; }
#endif
void MacroAssembler::print_instruction(int inst) {
const char* s;
switch (inv_op(inst)) {
default: s = "????"; break;
case call_op: s = "call"; break;
case branch_op:
switch (inv_op2(inst)) {
case fb_op2: s = "fb"; break;
case fbp_op2: s = "fbp"; break;
case br_op2: s = "br"; break;
case bp_op2: s = "bp"; break;
case cb_op2: s = "cb"; break;
case bpr_op2: {
if (is_cbcond(inst)) {
s = is_cxb(inst) ? "cxb" : "cwb";
} else {
s = "bpr";
}
break;
}
default: s = "????"; break;
}
}
::tty->print("%s", s);
}
// Patch instruction inst at offset inst_pos to refer to dest_pos
// and return the resulting instruction.
// We should have pcs, not offsets, but since all is relative, it will work out
......
......@@ -603,7 +603,6 @@ class MacroAssembler : public Assembler {
friend class Label;
protected:
static void print_instruction(int inst);
static int patched_branch(int dest_pos, int inst, int inst_pos);
static int branch_destination(int inst, int pos);
......@@ -759,9 +758,6 @@ class MacroAssembler : public Assembler {
// Required platform-specific helpers for Label::patch_instructions.
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
void pd_patch_instruction(address branch, address target);
#ifndef PRODUCT
static void pd_print_patched_instruction(address branch);
#endif
// sethi Macro handles optimizations and relocations
private:
......
......@@ -43,14 +43,6 @@ inline void MacroAssembler::pd_patch_instruction(address branch, address target)
stub_inst = patched_branch(target - branch, stub_inst, 0);
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
jint stub_inst = *(jint*) branch;
print_instruction(stub_inst);
::tty->print("%s", " (unresolved)");
}
#endif // PRODUCT
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
......
......@@ -171,7 +171,8 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
__ load_sized_value(Address(method_temp, Method::size_of_parameters_offset()),
__ ld_ptr(method_temp, in_bytes(Method::const_offset()), temp2);
__ load_sized_value(Address(temp2, ConstMethod::size_of_parameters_offset()),
temp2,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
......@@ -233,7 +234,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
__ load_sized_value(Address(G5_method, Method::size_of_parameters_offset()),
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), O4_param_size);
__ load_sized_value(Address(O4_param_size, ConstMethod::size_of_parameters_offset()),
O4_param_size,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
......
......@@ -10224,7 +10224,7 @@ instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result
//---------- Zeros Count Instructions ------------------------------------------
instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{
instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{
predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
match(Set dst (CountLeadingZerosI src));
effect(TEMP dst, TEMP tmp, KILL cr);
......@@ -10321,7 +10321,7 @@ instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosI(iRegI dst, iRegI src, flagsReg cr) %{
instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{
predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
match(Set dst (CountTrailingZerosI src));
effect(TEMP dst, KILL cr);
......@@ -10364,19 +10364,21 @@ instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{
//---------- Population Count Instructions -------------------------------------
instruct popCountI(iRegI dst, iRegI src) %{
instruct popCountI(iRegIsafe dst, iRegI src) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountI src));
format %{ "POPC $src, $dst" %}
format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t"
"POPC $dst, $dst" %}
ins_encode %{
__ popc($src$$Register, $dst$$Register);
__ srl($src$$Register, G0, $dst$$Register);
__ popc($dst$$Register, $dst$$Register);
%}
ins_pipe(ialu_reg);
%}
// Note: Long.bitCount(long) returns an int.
instruct popCountL(iRegI dst, iRegL src) %{
instruct popCountL(iRegIsafe dst, iRegL src) %{
predicate(UsePopCountInstruction);
match(Set dst (PopCountL src));
......
......@@ -434,7 +434,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
// the frame is greater than one page in size, so check against
// the bottom of the stack
__ cmp_and_brx_short(SP, Rscratch, Assembler::greater, Assembler::pt, after_frame_check);
__ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
// the stack will overflow, throw an exception
......@@ -494,9 +494,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// (gri - 2/25/2000)
const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
const Address size_of_locals (G5_method, Method::size_of_locals_offset());
const Address constMethod (G5_method, Method::const_offset());
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
const int extra_space =
......@@ -506,11 +503,15 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
(native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
const Register Glocals_size = G3;
const Register RconstMethod = Glocals_size;
const Register Otmp1 = O3;
const Register Otmp2 = O4;
// Lscratch can't be used as a temporary because the call_stub uses
// it to assert that the stack frame was setup correctly.
const Address constMethod (G5_method, Method::const_offset());
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
__ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_parameters, Glocals_size);
// Gargs points to first local + BytesPerWord
......@@ -530,6 +531,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
//
// Compute number of locals in method apart from incoming parameters
//
const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset());
__ ld_ptr( constMethod, Otmp1 );
__ lduh( size_of_locals, Otmp1 );
__ sub( Otmp1, Glocals_size, Glocals_size );
__ round_to( Glocals_size, WordsPerLong );
......@@ -1256,8 +1259,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
const Address size_of_locals (G5_method, Method::size_of_locals_offset());
const Address constMethod (G5_method, Method::const_offset());
// Seems like G5_method is live at the point this is used. So we could make this look consistent
// and use in the asserts.
const Address access_flags (Lmethod, Method::access_flags_offset());
......@@ -1307,8 +1309,13 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
init_value = G0;
Label clear_loop;
const Register RconstMethod = O1;
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset());
// NOTE: If you change the frame layout, this code will need to
// be updated!
__ ld_ptr( constMethod, RconstMethod );
__ lduh( size_of_locals, O2 );
__ lduh( size_of_parameters, O1 );
__ sll( O2, Interpreter::logStackElementSize, O2);
......@@ -1823,9 +1830,13 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
const Register Gtmp1 = G3_scratch;
const Register Gtmp2 = G1_scratch;
const Register RconstMethod = Gtmp1;
const Address constMethod(Lmethod, Method::const_offset());
const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
// Compute size of arguments for saving when returning to deoptimized caller
__ lduh(Lmethod, in_bytes(Method::size_of_parameters_offset()), Gtmp1);
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, Gtmp1);
__ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
__ sub(Llocals, Gtmp1, Gtmp2);
__ add(Gtmp2, wordSize, Gtmp2);
......
......@@ -3040,7 +3040,8 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
Register Rtemp = G4_scratch;
// Load receiver from stack slot
__ lduh(G5_method, in_bytes(Method::size_of_parameters_offset()), G4_scratch);
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
__ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0);
// receiver NULL check
......
此差异已折叠。
......@@ -313,10 +313,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
#endif
} else {
// make a copy the code which is going to be patched.
for ( int i = 0; i < _bytes_to_copy; i++) {
for (int i = 0; i < _bytes_to_copy; i++) {
address ptr = (address)(_pc_start + i);
int a_byte = (*ptr) & 0xFF;
__ a_byte (a_byte);
__ emit_int8(a_byte);
*ptr = 0x90; // make the site look like a nop
}
}
......@@ -363,11 +363,11 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
// emit the offsets needed to find the code to patch
int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
__ a_byte(0xB8);
__ a_byte(0);
__ a_byte(being_initialized_entry_offset);
__ a_byte(bytes_to_skip);
__ a_byte(_bytes_to_copy);
__ emit_int8((unsigned char)0xB8);
__ emit_int8(0);
__ emit_int8(being_initialized_entry_offset);
__ emit_int8(bytes_to_skip);
__ emit_int8(_bytes_to_copy);
address patch_info_pc = __ pc();
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
......
......@@ -611,8 +611,6 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter only
// rsi/r13 - previous interpreter state pointer
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
......@@ -977,15 +975,16 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// to save/restore.
address entry_point = __ pc();
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
const Address size_of_locals (rbx, Method::size_of_locals_offset());
const Address constMethod (rbx, Method::const_offset());
const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
// rsi/r13 == state/locals rdi == prevstate
const Register locals = rdi;
// get parameter size (always needed)
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
......@@ -994,6 +993,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// for natives the size of locals is zero
// compute beginning of parameters /locals
__ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
// initialize fixed part of activation frame
......@@ -1107,11 +1107,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
const Register method = rbx;
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
const Address constMethod (method, Method::const_offset());
const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
// allocate space for parameters
__ movptr(method, STATE(_method));
__ verify_method_ptr(method);
__ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
__ movptr(t, constMethod);
__ load_unsigned_short(t, size_of_parameters);
__ shll(t, 2);
#ifdef _LP64
__ subptr(rsp, t);
......@@ -1700,15 +1703,17 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// save sender sp
__ push(rcx);
const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
const Address size_of_locals (rbx, Method::size_of_locals_offset());
const Address constMethod (rbx, Method::const_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
// const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
// const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
// const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
// get parameter size (always needed)
__ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
......@@ -1989,7 +1994,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ movptr(rbx, STATE(_result._to_call._callee));
// callee left args on top of expression stack, remove them
__ load_unsigned_short(rcx, Address(rbx, Method::size_of_parameters_offset()));
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
__ lea(rsp, Address(rsp, rcx, Address::times_ptr));
__ movl(rcx, Address(rbx, Method::result_index_offset()));
......@@ -2159,7 +2166,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// Make it look like call_stub calling conventions
// Get (potential) receiver
__ load_unsigned_short(rcx, size_of_parameters); // get size of parameters in words
// get size of parameters in words
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
__ pushptr(recursive.addr()); // make it look good in the debugger
......
......@@ -1023,7 +1023,7 @@ void MacroAssembler::lea(Address dst, AddressLiteral adr) {
void MacroAssembler::leave() {
// %%% is this really better? Why not on 32bit too?
emit_byte(0xC9); // LEAVE
emit_int8((unsigned char)0xC9); // LEAVE
}
void MacroAssembler::lneg(Register hi, Register lo) {
......@@ -2112,11 +2112,11 @@ void MacroAssembler::fat_nop() {
if (UseAddressNop) {
addr_nop_5();
} else {
emit_byte(0x26); // es:
emit_byte(0x2e); // cs:
emit_byte(0x64); // fs:
emit_byte(0x65); // gs:
emit_byte(0x90);
emit_int8(0x26); // es:
emit_int8(0x2e); // cs:
emit_int8(0x64); // fs:
emit_int8(0x65); // gs:
emit_int8((unsigned char)0x90);
}
}
......@@ -2534,12 +2534,12 @@ void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
int offs = (intptr_t)dst.target() - ((intptr_t)pc());
if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
// 0111 tttn #8-bit disp
emit_byte(0x70 | cc);
emit_byte((offs - short_size) & 0xFF);
emit_int8(0x70 | cc);
emit_int8((offs - short_size) & 0xFF);
} else {
// 0000 1111 1000 tttn #32-bit disp
emit_byte(0x0F);
emit_byte(0x80 | cc);
emit_int8(0x0F);
emit_int8((unsigned char)(0x80 | cc));
emit_long(offs - long_size);
}
} else {
......@@ -3085,7 +3085,8 @@ void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
// Used in sign-bit flipping with aligned address.
assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
if (reachable(src)) {
Assembler::pshufb(dst, as_Address(src));
} else {
......
......@@ -126,25 +126,6 @@ class MacroAssembler: public Assembler {
}
}
#ifndef PRODUCT
static void pd_print_patched_instruction(address branch) {
const char* s;
unsigned char op = branch[0];
if (op == 0xE8) {
s = "call";
} else if (op == 0xE9 || op == 0xEB) {
s = "jmp";
} else if ((op & 0xF0) == 0x70) {
s = "jcc";
} else if (op == 0x0F) {
s = "jcc";
} else {
s = "????";
}
tty->print("%s (unresolved)", s);
}
#endif
// The following 4 methods return the offset of the appropriate move instruction
// Support for fast byte/short loading with zero extension (depending on particular CPU)
......
......@@ -169,8 +169,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
__ movptr(temp2, Address(method_temp, Method::const_offset()));
__ load_sized_value(temp2,
Address(method_temp, Method::size_of_parameters_offset()),
Address(temp2, ConstMethod::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
Label L;
......@@ -234,8 +235,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
__ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
__ load_sized_value(rdx_argp,
Address(rbx_method, Method::size_of_parameters_offset()),
Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
......
......@@ -2174,13 +2174,13 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg2 - K (key) in little endian int array
//
address generate_aescrypt_encryptBlock() {
assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
Label L_doLast;
address start = __ pc();
const Register from = rsi; // source array address
const Register from = rdx; // source array address
const Register to = rdx; // destination array address
const Register key = rcx; // key array address
const Register keylen = rax;
......@@ -2189,47 +2189,74 @@ class StubGenerator: public StubCodeGenerator {
const Address key_param (rbp, 8+8);
const XMMRegister xmm_result = xmm0;
const XMMRegister xmm_temp = xmm1;
const XMMRegister xmm_key_shuf_mask = xmm2;
const XMMRegister xmm_key_shuf_mask = xmm1;
const XMMRegister xmm_temp1 = xmm2;
const XMMRegister xmm_temp2 = xmm3;
const XMMRegister xmm_temp3 = xmm4;
const XMMRegister xmm_temp4 = xmm5;
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ push(rsi);
__ movptr(from , from_param);
__ movptr(to , to_param);
__ movptr(key , key_param);
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ movptr(from, from_param);
__ movptr(key, key_param);
// keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
__ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
// keylen = # of 32-bit words, convert to 128-bit words
__ shrl(keylen, 2);
__ subl(keylen, 11); // every key has at least 11 128-bit words, some have more
__ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
__ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input
__ movptr(to, to_param);
// For encryption, the java expanded key ordering is just what we need
load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
__ pxor(xmm_result, xmm_temp);
for (int offset = 0x10; offset <= 0x90; offset += 0x10) {
aes_enc_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
}
load_key (xmm_temp, key, 0xa0, xmm_key_shuf_mask);
__ cmpl(keylen, 0);
__ jcc(Assembler::equal, L_doLast);
__ aesenc(xmm_result, xmm_temp); // only in 192 and 256 bit keys
aes_enc_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
load_key(xmm_temp, key, 0xc0, xmm_key_shuf_mask);
__ subl(keylen, 2);
__ jcc(Assembler::equal, L_doLast);
__ aesenc(xmm_result, xmm_temp); // only in 256 bit keys
aes_enc_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
load_key(xmm_temp, key, 0xe0, xmm_key_shuf_mask);
load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask);
__ pxor(xmm_result, xmm_temp1);
load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
__ aesenc(xmm_result, xmm_temp1);
__ aesenc(xmm_result, xmm_temp2);
__ aesenc(xmm_result, xmm_temp3);
__ aesenc(xmm_result, xmm_temp4);
load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
__ aesenc(xmm_result, xmm_temp1);
__ aesenc(xmm_result, xmm_temp2);
__ aesenc(xmm_result, xmm_temp3);
__ aesenc(xmm_result, xmm_temp4);
load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
__ cmpl(keylen, 44);
__ jccb(Assembler::equal, L_doLast);
__ aesenc(xmm_result, xmm_temp1);
__ aesenc(xmm_result, xmm_temp2);
load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
__ cmpl(keylen, 52);
__ jccb(Assembler::equal, L_doLast);
__ aesenc(xmm_result, xmm_temp1);
__ aesenc(xmm_result, xmm_temp2);
load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
__ BIND(L_doLast);
__ aesenclast(xmm_result, xmm_temp);
__ aesenc(xmm_result, xmm_temp1);
__ aesenclast(xmm_result, xmm_temp2);
__ movdqu(Address(to, 0), xmm_result); // store the result
__ xorptr(rax, rax); // return 0
__ pop(rsi);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
......@@ -2245,13 +2272,13 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg2 - K (key) in little endian int array
//
address generate_aescrypt_decryptBlock() {
assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
Label L_doLast;
address start = __ pc();
const Register from = rsi; // source array address
const Register from = rdx; // source array address
const Register to = rdx; // destination array address
const Register key = rcx; // key array address
const Register keylen = rax;
......@@ -2260,51 +2287,76 @@ class StubGenerator: public StubCodeGenerator {
const Address key_param (rbp, 8+8);
const XMMRegister xmm_result = xmm0;
const XMMRegister xmm_temp = xmm1;
const XMMRegister xmm_key_shuf_mask = xmm2;
const XMMRegister xmm_key_shuf_mask = xmm1;
const XMMRegister xmm_temp1 = xmm2;
const XMMRegister xmm_temp2 = xmm3;
const XMMRegister xmm_temp3 = xmm4;
const XMMRegister xmm_temp4 = xmm5;
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ push(rsi);
__ movptr(from , from_param);
__ movptr(to , to_param);
__ movptr(key , key_param);
__ movptr(from, from_param);
__ movptr(key, key_param);
// keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
__ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
// keylen = # of 32-bit words, convert to 128-bit words
__ shrl(keylen, 2);
__ subl(keylen, 11); // every key has at least 11 128-bit words, some have more
__ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
__ movdqu(xmm_result, Address(from, 0));
__ movptr(to, to_param);
// for decryption java expanded key ordering is rotated one position from what we want
// so we start from 0x10 here and hit 0x00 last
// we don't know if the key is aligned, hence not using load-execute form
load_key(xmm_temp, key, 0x10, xmm_key_shuf_mask);
__ pxor (xmm_result, xmm_temp);
for (int offset = 0x20; offset <= 0xa0; offset += 0x10) {
aes_dec_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
}
__ cmpl(keylen, 0);
__ jcc(Assembler::equal, L_doLast);
// only in 192 and 256 bit keys
aes_dec_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
aes_dec_key(xmm_result, xmm_temp, key, 0xc0, xmm_key_shuf_mask);
__ subl(keylen, 2);
__ jcc(Assembler::equal, L_doLast);
// only in 256 bit keys
aes_dec_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
aes_dec_key(xmm_result, xmm_temp, key, 0xe0, xmm_key_shuf_mask);
load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
__ pxor (xmm_result, xmm_temp1);
__ aesdec(xmm_result, xmm_temp2);
__ aesdec(xmm_result, xmm_temp3);
__ aesdec(xmm_result, xmm_temp4);
load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
__ aesdec(xmm_result, xmm_temp1);
__ aesdec(xmm_result, xmm_temp2);
__ aesdec(xmm_result, xmm_temp3);
__ aesdec(xmm_result, xmm_temp4);
load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask);
__ cmpl(keylen, 44);
__ jccb(Assembler::equal, L_doLast);
__ aesdec(xmm_result, xmm_temp1);
__ aesdec(xmm_result, xmm_temp2);
load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
__ cmpl(keylen, 52);
__ jccb(Assembler::equal, L_doLast);
__ aesdec(xmm_result, xmm_temp1);
__ aesdec(xmm_result, xmm_temp2);
load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
__ BIND(L_doLast);
// for decryption the aesdeclast operation is always on key+0x00
load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
__ aesdeclast(xmm_result, xmm_temp);
__ aesdec(xmm_result, xmm_temp1);
__ aesdec(xmm_result, xmm_temp2);
// for decryption the aesdeclast operation is always on key+0x00
__ aesdeclast(xmm_result, xmm_temp3);
__ movdqu(Address(to, 0), xmm_result); // store the result
__ xorptr(rax, rax); // return 0
__ pop(rsi);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
......@@ -2340,7 +2392,7 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg4 - input length
//
address generate_cipherBlockChaining_encryptAESCrypt() {
assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
address start = __ pc();
......@@ -2393,7 +2445,7 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::notEqual, L_key_192_256);
// 128 bit code follows here
__ movptr(pos, 0);
__ movl(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_loopTop_128);
__ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
......@@ -2423,15 +2475,15 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
__ BIND(L_key_192_256);
// here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
__ BIND(L_key_192_256);
// here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
__ cmpl(rax, 52);
__ jcc(Assembler::notEqual, L_key_256);
// 192-bit code follows here (could be changed to use more xmm registers)
__ movptr(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_loopTop_192);
__ movl(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_loopTop_192);
__ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
__ pxor (xmm_result, xmm_temp); // xor with the current r vector
......@@ -2452,11 +2504,11 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::notEqual, L_loopTop_192);
__ jmp(L_exit);
__ BIND(L_key_256);
__ BIND(L_key_256);
// 256-bit code follows here (could be changed to use more xmm registers)
__ movptr(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_loopTop_256);
__ movl(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_loopTop_256);
__ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
__ pxor (xmm_result, xmm_temp); // xor with the current r vector
......@@ -2495,7 +2547,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_cipherBlockChaining_decryptAESCrypt() {
assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
assert(UseAES, "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
address start = __ pc();
......@@ -2556,9 +2608,9 @@ class StubGenerator: public StubCodeGenerator {
// 128-bit code follows here, parallelized
__ movptr(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_singleBlock_loopTop_128);
__ movl(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_singleBlock_loopTop_128);
__ cmpptr(len_reg, 0); // any blocks left??
__ jcc(Assembler::equal, L_exit);
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
......@@ -2597,7 +2649,7 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::notEqual, L_key_256);
// 192-bit code follows here (could be optimized to use parallelism)
__ movptr(pos, 0);
__ movl(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_singleBlock_loopTop_192);
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
......@@ -2622,7 +2674,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_key_256);
// 256-bit code follows here (could be optimized to use parallelism)
__ movptr(pos, 0);
__ movl(pos, 0);
__ align(OptoLoopAlignment);
__ BIND(L_singleBlock_loopTop_256);
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -30,7 +30,7 @@
void MacroAssembler::int3() {
emit_byte(0xCC);
emit_int8((unsigned char)0xCC);
}
#ifndef _LP64
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册