Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
bcaaa7a7
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
bcaaa7a7
编写于
5月 02, 2014
作者:
G
goetz
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
8042309: Some bugfixes for the ppc64 port.
Reviewed-by: kvn
上级
ba6464cf
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
172 addition
and
152 deletion
+172
-152
src/cpu/ppc/vm/cppInterpreter_ppc.cpp
src/cpu/ppc/vm/cppInterpreter_ppc.cpp
+6
-5
src/cpu/ppc/vm/frame_ppc.inline.hpp
src/cpu/ppc/vm/frame_ppc.inline.hpp
+2
-0
src/cpu/ppc/vm/interp_masm_ppc_64.hpp
src/cpu/ppc/vm/interp_masm_ppc_64.hpp
+1
-1
src/cpu/ppc/vm/interpreterRT_ppc.cpp
src/cpu/ppc/vm/interpreterRT_ppc.cpp
+1
-0
src/cpu/ppc/vm/interpreter_ppc.cpp
src/cpu/ppc/vm/interpreter_ppc.cpp
+1
-17
src/cpu/ppc/vm/jniFastGetField_ppc.cpp
src/cpu/ppc/vm/jniFastGetField_ppc.cpp
+3
-3
src/cpu/ppc/vm/ppc.ad
src/cpu/ppc/vm/ppc.ad
+89
-69
src/cpu/ppc/vm/templateInterpreter_ppc.cpp
src/cpu/ppc/vm/templateInterpreter_ppc.cpp
+25
-13
src/cpu/ppc/vm/templateTable_ppc_64.cpp
src/cpu/ppc/vm/templateTable_ppc_64.cpp
+9
-9
src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
+35
-35
未找到文件。
src/cpu/ppc/vm/cppInterpreter_ppc.cpp
浏览文件 @
bcaaa7a7
/*
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
...
@@ -403,7 +404,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_ov
...
@@ -403,7 +404,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_ov
BLOCK_COMMENT
(
"compute_interpreter_state {"
);
BLOCK_COMMENT
(
"compute_interpreter_state {"
);
// access_flags = method->access_flags();
// access_flags = method->access_flags();
// TODO: PPC port: assert(4 ==
methodOopDesc::sz_access_flags(
), "unexpected field size");
// TODO: PPC port: assert(4 ==
sizeof(AccessFlags
), "unexpected field size");
__
lwa
(
access_flags
,
method_
(
access_flags
));
__
lwa
(
access_flags
,
method_
(
access_flags
));
// parameter_count = method->constMethod->size_of_parameters();
// parameter_count = method->constMethod->size_of_parameters();
...
@@ -1055,7 +1056,7 @@ address CppInterpreterGenerator::generate_native_entry(void) {
...
@@ -1055,7 +1056,7 @@ address CppInterpreterGenerator::generate_native_entry(void) {
assert
(
access_flags
->
is_nonvolatile
(),
assert
(
access_flags
->
is_nonvolatile
(),
"access_flags must be in a non-volatile register"
);
"access_flags must be in a non-volatile register"
);
// Type check.
// Type check.
// TODO: PPC port: assert(4 ==
methodOopDesc::sz_access_flags(
), "unexpected field size");
// TODO: PPC port: assert(4 ==
sizeof(AccessFlags
), "unexpected field size");
__
lwz
(
access_flags
,
method_
(
access_flags
));
__
lwz
(
access_flags
,
method_
(
access_flags
));
// We don't want to reload R19_method and access_flags after calls
// We don't want to reload R19_method and access_flags after calls
...
@@ -1838,7 +1839,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
...
@@ -1838,7 +1839,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
// Interpreter state fields.
// Interpreter state fields.
const
Register
msg
=
R24_tmp4
;
const
Register
msg
=
R24_tmp4
;
// Method
Oop
fields.
// Method fields.
const
Register
parameter_count
=
R25_tmp5
;
const
Register
parameter_count
=
R25_tmp5
;
const
Register
result_index
=
R26_tmp6
;
const
Register
result_index
=
R26_tmp6
;
...
@@ -2023,7 +2024,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
...
@@ -2023,7 +2024,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
__
add
(
R17_tos
,
R17_tos
,
parameter_count
);
__
add
(
R17_tos
,
R17_tos
,
parameter_count
);
// Result stub address array index
// Result stub address array index
// TODO: PPC port: assert(4 ==
methodOopDesc::sz_result_index(
), "unexpected field size");
// TODO: PPC port: assert(4 ==
sizeof(AccessFlags
), "unexpected field size");
__
lwa
(
result_index
,
method_
(
result_index
));
__
lwa
(
result_index
,
method_
(
result_index
));
__
li
(
msg
,
BytecodeInterpreter
::
method_resume
);
__
li
(
msg
,
BytecodeInterpreter
::
method_resume
);
...
@@ -2709,7 +2710,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
...
@@ -2709,7 +2710,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
__
ld
(
R3_ARG1
,
state_
(
_result
.
_osr
.
_osr_buf
));
__
ld
(
R3_ARG1
,
state_
(
_result
.
_osr
.
_osr_buf
));
__
mtctr
(
R12_scratch2
);
__
mtctr
(
R12_scratch2
);
// Load method
oop
, gc may move it during execution of osr'd method.
// Load method, gc may move it during execution of osr'd method.
__
ld
(
R22_tmp2
,
state_
(
_method
));
__
ld
(
R22_tmp2
,
state_
(
_method
));
// Load message 'call_method'.
// Load message 'call_method'.
__
li
(
R23_tmp3
,
BytecodeInterpreter
::
call_method
);
__
li
(
R23_tmp3
,
BytecodeInterpreter
::
call_method
);
...
...
src/cpu/ppc/vm/frame_ppc.inline.hpp
浏览文件 @
bcaaa7a7
...
@@ -26,6 +26,8 @@
...
@@ -26,6 +26,8 @@
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#include "code/codeCache.hpp"
// Inline functions for ppc64 frames:
// Inline functions for ppc64 frames:
// Find codeblob and set deopt_state.
// Find codeblob and set deopt_state.
...
...
src/cpu/ppc/vm/interp_masm_ppc_64.hpp
浏览文件 @
bcaaa7a7
...
@@ -26,7 +26,7 @@
...
@@ -26,7 +26,7 @@
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#include "as
sembler_ppc.inline
.hpp"
#include "as
m/macroAssembler
.hpp"
#include "interpreter/invocationCounter.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assembler with interpreter-specific macros.
// This file specializes the assembler with interpreter-specific macros.
...
...
src/cpu/ppc/vm/interpreterRT_ppc.cpp
浏览文件 @
bcaaa7a7
...
@@ -24,6 +24,7 @@
...
@@ -24,6 +24,7 @@
*/
*/
#include "precompiled.hpp"
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/allocation.inline.hpp"
...
...
src/cpu/ppc/vm/interpreter_ppc.cpp
浏览文件 @
bcaaa7a7
...
@@ -139,32 +139,16 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
...
@@ -139,32 +139,16 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Signature is in R3_RET. Signature is callee saved.
// Signature is in R3_RET. Signature is callee saved.
__
mr
(
signature
,
R3_RET
);
__
mr
(
signature
,
R3_RET
);
// Reload method, it may have moved.
#ifdef CC_INTERP
__
ld
(
R19_method
,
state_
(
_method
));
#else
__
ld
(
R19_method
,
0
,
target_sp
);
__
ld
(
R19_method
,
_ijava_state_neg
(
method
),
R19_method
);
#endif
// Get the result handler.
// Get the result handler.
__
call_VM_leaf
(
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
get_result_handler
),
R16_thread
,
R19_method
);
__
call_VM_leaf
(
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
get_result_handler
),
R16_thread
,
R19_method
);
// Reload method, it may have moved.
#ifdef CC_INTERP
__
ld
(
R19_method
,
state_
(
_method
));
#else
__
ld
(
R19_method
,
0
,
target_sp
);
__
ld
(
R19_method
,
_ijava_state_neg
(
method
),
R19_method
);
#endif
{
{
Label
L
;
Label
L
;
// test if static
// test if static
// _access_flags._flags must be at offset 0.
// _access_flags._flags must be at offset 0.
// TODO PPC port: requires change in shared code.
// TODO PPC port: requires change in shared code.
//assert(in_bytes(AccessFlags::flags_offset()) == 0,
//assert(in_bytes(AccessFlags::flags_offset()) == 0,
// "Method
OopDesc._access_flags == MethodOop
Desc._access_flags._flags");
// "Method
Desc._access_flags == Method
Desc._access_flags._flags");
// _access_flags must be a 32 bit value.
// _access_flags must be a 32 bit value.
assert
(
sizeof
(
AccessFlags
)
==
4
,
"wrong size"
);
assert
(
sizeof
(
AccessFlags
)
==
4
,
"wrong size"
);
__
lwa
(
R11_scratch1
/*access_flags*/
,
method_
(
access_flags
));
__
lwa
(
R11_scratch1
/*access_flags*/
,
method_
(
access_flags
));
...
...
src/cpu/ppc/vm/jniFastGetField_ppc.cpp
浏览文件 @
bcaaa7a7
...
@@ -32,7 +32,7 @@
...
@@ -32,7 +32,7 @@
address
JNI_FastGetField
::
generate_fast_get_int_field0
(
BasicType
type
)
{
address
JNI_FastGetField
::
generate_fast_get_int_field0
(
BasicType
type
)
{
//
w
e don't have fast jni accessors.
//
W
e don't have fast jni accessors.
return
(
address
)
-
1
;
return
(
address
)
-
1
;
}
}
...
@@ -57,12 +57,12 @@ address JNI_FastGetField::generate_fast_get_int_field() {
...
@@ -57,12 +57,12 @@ address JNI_FastGetField::generate_fast_get_int_field() {
}
}
address
JNI_FastGetField
::
generate_fast_get_long_field
()
{
address
JNI_FastGetField
::
generate_fast_get_long_field
()
{
//
w
e don't have fast jni accessors.
//
W
e don't have fast jni accessors.
return
(
address
)
-
1
;
return
(
address
)
-
1
;
}
}
address
JNI_FastGetField
::
generate_fast_get_float_field0
(
BasicType
type
)
{
address
JNI_FastGetField
::
generate_fast_get_float_field0
(
BasicType
type
)
{
// e don't have fast jni accessors.
//
W
e don't have fast jni accessors.
return
(
address
)
-
1
;
return
(
address
)
-
1
;
}
}
...
...
src/cpu/ppc/vm/ppc.ad
浏览文件 @
bcaaa7a7
...
@@ -1135,6 +1135,7 @@ class CallStubImpl {
...
@@ -1135,6 +1135,7 @@ class CallStubImpl {
public:
public:
// Emit call stub, compiled java to interpreter.
static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
// Size of call trampoline stub.
// Size of call trampoline stub.
...
@@ -3663,6 +3664,8 @@ encode %{
...
@@ -3663,6 +3664,8 @@ encode %{
%}
%}
// Compound version of call dynamic
// Compound version of call dynamic
// Toc is only passed so that it can be used in ins_encode statement.
// In the code we have to use $constanttablebase.
enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
MacroAssembler _masm(&cbuf);
MacroAssembler _masm(&cbuf);
...
@@ -3670,14 +3673,17 @@ encode %{
...
@@ -3670,14 +3673,17 @@ encode %{
Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
#if 0
#if 0
int vtable_index = this->_vtable_index;
if (_vtable_index < 0) {
if (_vtable_index < 0) {
// Must be invalid_vtable_index, not nonvirtual_vtable_index.
// Must be invalid_vtable_index, not nonvirtual_vtable_index.
assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value");
assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value");
Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
AddressLiteral meta = __ allocate_metadata_address((Metadata *)Universe::non_oop_word());
// Virtual call relocation will point to ic load.
address virtual_call_meta_addr = __ pc();
address virtual_call_meta_addr = __ pc();
__ load_const_from_method_toc(ic_reg, meta, Rtoc);
// Load a clear inline cache.
AddressLiteral empty_ic((address) Universe::non_oop_word());
__ load_const_from_method_toc(ic_reg, empty_ic, Rtoc);
// CALL to fixup routine. Fixup routine uses ScopeDesc info
// CALL to fixup routine. Fixup routine uses ScopeDesc info
// to determine who we intended to call.
// to determine who we intended to call.
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
...
@@ -3710,7 +3716,6 @@ encode %{
...
@@ -3710,7 +3716,6 @@ encode %{
"Fix constant in ret_addr_offset()");
"Fix constant in ret_addr_offset()");
}
}
#endif
#endif
guarantee(0, "Fix handling of toc edge: messes up derived/base pairs.");
Unimplemented(); // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!).
Unimplemented(); // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!).
%}
%}
...
@@ -7064,7 +7069,7 @@ instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc s
...
@@ -7064,7 +7069,7 @@ instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc s
n1->_bottom_type = _bottom_type;
n1->_bottom_type = _bottom_type;
decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode();
decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode();
n2->add_req(n_region, n
2
);
n2->add_req(n_region, n
1
);
n2->_opnds[0] = op_dst;
n2->_opnds[0] = op_dst;
n2->_opnds[1] = op_dst;
n2->_opnds[1] = op_dst;
n2->_bottom_type = _bottom_type;
n2->_bottom_type = _bottom_type;
...
@@ -7929,7 +7934,23 @@ instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
...
@@ -7929,7 +7934,23 @@ instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
// positive longs and 0xF...F for negative ones.
// positive longs and 0xF...F for negative ones.
instruct signmask64I_regI(iRegIdst dst, iRegIsrc src) %{
instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
// no match-rule, false predicate
effect(DEF dst, USE src);
predicate(false);
format %{ "SRADI $dst, $src, #63" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_sradi);
__ sradi($dst$$Register, $src$$Register, 0x3f);
%}
ins_pipe(pipe_class_default);
%}
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
// positive longs and 0xF...F for negative ones.
instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
// no match-rule, false predicate
// no match-rule, false predicate
effect(DEF dst, USE src);
effect(DEF dst, USE src);
predicate(false);
predicate(false);
...
@@ -9619,14 +9640,14 @@ instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
...
@@ -9619,14 +9640,14 @@ instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
ins_cost(DEFAULT_COST*4);
ins_cost(DEFAULT_COST*4);
expand %{
expand %{
iReg
I
dst src1s;
iReg
L
dst src1s;
iReg
I
dst src2s;
iReg
L
dst src2s;
iReg
I
dst diff;
iReg
L
dst diff;
sxtI_reg(src1s, src1); // ensure proper sign extention
convI2L_reg(src1s, src1); // Ensure proper sign extension.
sxtI_reg(src2s, src2); // ensure proper sign extention
convI2L_reg(src2s, src2); // Ensure proper sign extension.
sub
I
_reg_reg(diff, src1s, src2s);
sub
L
_reg_reg(diff, src1s, src2s);
// Need to consider >=33 bit result, therefore we need signmaskL.
// Need to consider >=33 bit result, therefore we need signmaskL.
signmask64I_reg
I
(dst, diff);
signmask64I_reg
L
(dst, diff);
%}
%}
%}
%}
...
@@ -11178,18 +11199,18 @@ instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
...
@@ -11178,18 +11199,18 @@ instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
ins_cost(DEFAULT_COST*6);
ins_cost(DEFAULT_COST*6);
expand %{
expand %{
iReg
I
dst src1s;
iReg
L
dst src1s;
iReg
I
dst src2s;
iReg
L
dst src2s;
iReg
I
dst diff;
iReg
L
dst diff;
iReg
I
dst sm;
iReg
L
dst sm;
iReg
I
dst doz; // difference or zero
iReg
L
dst doz; // difference or zero
sxtI_reg(src1s, src1); // Ensure proper sign extent
ion.
convI2L_reg(src1s, src1); // Ensure proper sign extens
ion.
sxtI_reg(src2s, src2); // Ensure proper sign extent
ion.
convI2L_reg(src2s, src2); // Ensure proper sign extens
ion.
sub
I
_reg_reg(diff, src2s, src1s);
sub
L
_reg_reg(diff, src2s, src1s);
// Need to consider >=33 bit result, therefore we need signmaskL.
// Need to consider >=33 bit result, therefore we need signmaskL.
signmask64
I_regI
(sm, diff);
signmask64
L_regL
(sm, diff);
and
I
_reg_reg(doz, diff, sm); // <=0
and
L
_reg_reg(doz, diff, sm); // <=0
addI_reg
_reg
(dst, doz, src1s);
addI_reg
L_regL
(dst, doz, src1s);
%}
%}
%}
%}
...
@@ -11198,19 +11219,18 @@ instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
...
@@ -11198,19 +11219,18 @@ instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
ins_cost(DEFAULT_COST*6);
ins_cost(DEFAULT_COST*6);
expand %{
expand %{
immI_minus1 m1 %{ -1 %}
iRegLdst src1s;
iRegIdst src1s;
iRegLdst src2s;
iRegIdst src2s;
iRegLdst diff;
iRegIdst diff;
iRegLdst sm;
iRegIdst sm;
iRegLdst doz; // difference or zero
iRegIdst doz; // difference or zero
convI2L_reg(src1s, src1); // Ensure proper sign extension.
sxtI_reg(src1s, src1); // Ensure proper sign extention.
convI2L_reg(src2s, src2); // Ensure proper sign extension.
sxtI_reg(src2s, src2); // Ensure proper sign extention.
subL_reg_reg(diff, src2s, src1s);
subI_reg_reg(diff, src2s, src1s);
// Need to consider >=33 bit result, therefore we need signmaskL.
// Need to consider >=33 bit result, therefore we need signmaskL.
signmask64
I_regI
(sm, diff);
signmask64
L_regL
(sm, diff);
andc
I_reg_reg(doz, sm, m1, diff
); // >=0
andc
L_reg_reg(doz, diff, sm
); // >=0
addI_reg
_reg
(dst, doz, src1s);
addI_reg
L_regL
(dst, doz, src1s);
%}
%}
%}
%}
...
...
src/cpu/ppc/vm/templateInterpreter_ppc.cpp
浏览文件 @
bcaaa7a7
...
@@ -81,24 +81,18 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
...
@@ -81,24 +81,18 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
#if 0
#if 0
// Call special ClassCastException constructor taking object to cast
// Call special ClassCastException constructor taking object to cast
// and target class as arguments.
// and target class as arguments.
address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler(
const char* name
) {
address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
address entry = __ pc();
address entry = __ pc();
// Target class oop is in register R6_ARG4 by convention!
// Expression stack must be empty before entering the VM if an
// Expression stack must be empty before entering the VM if an
// exception happened.
// exception happened.
__ empty_expression_stack();
__ empty_expression_stack();
// Setup parameters.
// Thread will be loaded to R3_ARG1.
// Thread will be loaded to R3_ARG1.
__ load_const_optimized(R4_ARG2, (address) name);
// Target class oop is in register R5_ARG3 by convention!
__ mr(R5_ARG3, R17_tos);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
// R6_ARG4 contains specified class.
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose));
#ifdef ASSERT
// Above call must not return here since exception pending.
// Above call must not return here since exception pending.
__ should_not_reach_here();
DEBUG_ONLY(__ should_not_reach_here();)
#endif
return entry;
return entry;
}
}
#endif
#endif
...
@@ -1535,14 +1529,32 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
...
@@ -1535,14 +1529,32 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__
stw
(
R0
,
in_bytes
(
JavaThread
::
popframe_condition_offset
()),
R16_thread
);
__
stw
(
R0
,
in_bytes
(
JavaThread
::
popframe_condition_offset
()),
R16_thread
);
// Get out of the current method and re-execute the call that called us.
// Get out of the current method and re-execute the call that called us.
__
merge_frames
(
/*top_frame_sp*/
R21_sender_SP
,
/*return_pc*/
return_pc
,
R11_scratch1
,
R12_scratch2
);
__
merge_frames
(
/*top_frame_sp*/
R21_sender_SP
,
/*return_pc*/
noreg
,
R11_scratch1
,
R12_scratch2
);
__
restore_interpreter_state
(
R11_scratch1
);
__
restore_interpreter_state
(
R11_scratch1
);
__
ld
(
R12_scratch2
,
_ijava_state_neg
(
top_frame_sp
),
R11_scratch1
);
__
ld
(
R12_scratch2
,
_ijava_state_neg
(
top_frame_sp
),
R11_scratch1
);
__
resize_frame_absolute
(
R12_scratch2
,
R11_scratch1
,
R0
);
__
resize_frame_absolute
(
R12_scratch2
,
R11_scratch1
,
R0
);
__
mtlr
(
return_pc
);
if
(
ProfileInterpreter
)
{
if
(
ProfileInterpreter
)
{
__
set_method_data_pointer_for_bcp
();
__
set_method_data_pointer_for_bcp
();
}
}
#if INCLUDE_JVMTI
Label
L_done
;
__
lbz
(
R11_scratch1
,
0
,
R14_bcp
);
__
cmpwi
(
CCR0
,
R11_scratch1
,
Bytecodes
::
_invokestatic
);
__
bne
(
CCR0
,
L_done
);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__
ld
(
R4_ARG2
,
0
,
R18_locals
);
__
call_VM
(
R11_scratch1
,
CAST_FROM_FN_PTR
(
address
,
InterpreterRuntime
::
member_name_arg_or_null
),
R4_ARG2
,
R19_method
,
R14_bcp
);
__
cmpdi
(
CCR0
,
R11_scratch1
,
0
);
__
beq
(
CCR0
,
L_done
);
__
std
(
R11_scratch1
,
wordSize
,
R15_esp
);
__
bind
(
L_done
);
#endif // INCLUDE_JVMTI
__
dispatch_next
(
vtos
);
__
dispatch_next
(
vtos
);
}
}
// end of JVMTI PopFrame support
// end of JVMTI PopFrame support
...
...
src/cpu/ppc/vm/templateTable_ppc_64.cpp
浏览文件 @
bcaaa7a7
...
@@ -64,7 +64,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
...
@@ -64,7 +64,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
assert_different_registers
(
Rtmp1
,
Rtmp2
,
Rtmp3
,
Rval
,
Rbase
);
assert_different_registers
(
Rtmp1
,
Rtmp2
,
Rtmp3
,
Rval
,
Rbase
);
switch
(
barrier
)
{
switch
(
barrier
)
{
#if
ndef SERIALGC
#if
INCLUDE_ALL_GCS
case
BarrierSet
::
G1SATBCT
:
case
BarrierSet
::
G1SATBCT
:
case
BarrierSet
::
G1SATBCTLogging
:
case
BarrierSet
::
G1SATBCTLogging
:
{
{
...
@@ -104,7 +104,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
...
@@ -104,7 +104,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
__
bind
(
Ldone
);
__
bind
(
Ldone
);
}
}
break
;
break
;
#endif //
SERIALGC
#endif //
INCLUDE_ALL_GCS
case
BarrierSet
::
CardTableModRef
:
case
BarrierSet
::
CardTableModRef
:
case
BarrierSet
::
CardTableExtension
:
case
BarrierSet
::
CardTableExtension
:
{
{
...
@@ -259,17 +259,17 @@ void TemplateTable::fconst(int value) {
...
@@ -259,17 +259,17 @@ void TemplateTable::fconst(int value) {
switch
(
value
)
{
switch
(
value
)
{
default:
ShouldNotReachHere
();
default:
ShouldNotReachHere
();
case
0
:
{
case
0
:
{
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
zero
,
R0
);
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
zero
,
R0
,
true
);
__
lfs
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
__
lfs
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
break
;
break
;
}
}
case
1
:
{
case
1
:
{
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
one
,
R0
);
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
one
,
R0
,
true
);
__
lfs
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
__
lfs
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
break
;
break
;
}
}
case
2
:
{
case
2
:
{
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
two
,
R0
);
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
two
,
R0
,
true
);
__
lfs
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
__
lfs
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
break
;
break
;
}
}
...
@@ -282,12 +282,12 @@ void TemplateTable::dconst(int value) {
...
@@ -282,12 +282,12 @@ void TemplateTable::dconst(int value) {
static
double
one
=
1.0
;
static
double
one
=
1.0
;
switch
(
value
)
{
switch
(
value
)
{
case
0
:
{
case
0
:
{
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
zero
,
R0
);
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
zero
,
R0
,
true
);
__
lfd
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
__
lfd
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
break
;
break
;
}
}
case
1
:
{
case
1
:
{
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
one
,
R0
);
int
simm16_offset
=
__
load_const_optimized
(
R11_scratch1
,
(
address
*
)
&
one
,
R0
,
true
);
__
lfd
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
__
lfd
(
F15_ftos
,
simm16_offset
,
R11_scratch1
);
break
;
break
;
}
}
...
@@ -3728,9 +3728,9 @@ void TemplateTable::checkcast() {
...
@@ -3728,9 +3728,9 @@ void TemplateTable::checkcast() {
transition
(
atos
,
atos
);
transition
(
atos
,
atos
);
Label
Ldone
,
Lis_null
,
Lquicked
,
Lresolved
;
Label
Ldone
,
Lis_null
,
Lquicked
,
Lresolved
;
Register
Roffset
=
R
5_ARG3
,
Register
Roffset
=
R
6_ARG4
,
RobjKlass
=
R4_ARG2
,
RobjKlass
=
R4_ARG2
,
RspecifiedKlass
=
R
6_ARG4
,
// Generate_ClassCastException_verbose_handler will expect
this register.
RspecifiedKlass
=
R
5_ARG3
,
// Generate_ClassCastException_verbose_handler will read value from
this register.
Rcpool
=
R11_scratch1
,
Rcpool
=
R11_scratch1
,
Rtags
=
R12_scratch2
;
Rtags
=
R12_scratch2
;
...
...
src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
浏览文件 @
bcaaa7a7
...
@@ -53,41 +53,41 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *
...
@@ -53,41 +53,41 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *
inline
jlong
Atomic
::
load
(
volatile
jlong
*
src
)
{
return
*
src
;
}
inline
jlong
Atomic
::
load
(
volatile
jlong
*
src
)
{
return
*
src
;
}
/
*
/
/
machine barrier instructions:
//
machine barrier instructions:
//
- sync two-way memory barrier, aka fence
//
- sync two-way memory barrier, aka fence
- lwsync orders Store|Store,
//
- lwsync orders Store|Store,
Load|Store,
//
Load|Store,
Load|Load,
//
Load|Load,
but not Store|Load
//
but not Store|Load
- eieio orders memory accesses for device memory (only)
//
- eieio orders memory accesses for device memory (only)
- isync invalidates speculatively executed instructions
//
- isync invalidates speculatively executed instructions
From the POWER ISA 2.06 documentation:
//
From the POWER ISA 2.06 documentation:
"[...] an isync instruction prevents the execution of
//
"[...] an isync instruction prevents the execution of
instructions following the isync until instructions
//
instructions following the isync until instructions
preceding the isync have completed, [...]"
//
preceding the isync have completed, [...]"
From IBM's AIX assembler reference:
//
From IBM's AIX assembler reference:
"The isync [...] instructions causes the processor to
//
"The isync [...] instructions causes the processor to
refetch any instructions that might have been fetched
//
refetch any instructions that might have been fetched
prior to the isync instruction. The instruction isync
//
prior to the isync instruction. The instruction isync
causes the processor to wait for all previous instructions
//
causes the processor to wait for all previous instructions
to complete. Then any instructions already fetched are
//
to complete. Then any instructions already fetched are
discarded and instruction processing continues in the
//
discarded and instruction processing continues in the
environment established by the previous instructions."
//
environment established by the previous instructions."
//
semantic barrier instructions:
//
semantic barrier instructions:
(as defined in orderAccess.hpp)
//
(as defined in orderAccess.hpp)
//
- release orders Store|Store, (maps to lwsync)
//
- release orders Store|Store, (maps to lwsync)
Load|Store
//
Load|Store
- acquire orders Load|Store, (maps to lwsync)
//
- acquire orders Load|Store, (maps to lwsync)
Load|Load
//
Load|Load
- fence orders Store|Store, (maps to sync)
//
- fence orders Store|Store, (maps to sync)
Load|Store,
//
Load|Store,
Load|Load,
//
Load|Load,
Store|Load
//
Store|Load
*
/
/
/
#define strasm_sync "\n sync \n"
#define strasm_sync "\n sync \n"
#define strasm_lwsync "\n lwsync \n"
#define strasm_lwsync "\n lwsync \n"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录