提交 1b3888bb 编写于 作者: R roland

7029017: Additional architecture support for c2 compiler

Summary: Enables cross building of a c2 VM. Support masking of shift counts when the processor architecture mandates it.
Reviewed-by: kvn, never
上级 2e49f3b3
...@@ -102,7 +102,7 @@ all: $(EXEC) ...@@ -102,7 +102,7 @@ all: $(EXEC)
$(EXEC) : $(OBJECTS) $(EXEC) : $(OBJECTS)
@echo Making adlc @echo Making adlc
$(QUIETLY) $(LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS) $(QUIETLY) $(HOST.LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS)
# Random dependencies: # Random dependencies:
$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp $(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
...@@ -204,14 +204,14 @@ PROCESS_AD_FILES = awk '{ \ ...@@ -204,14 +204,14 @@ PROCESS_AD_FILES = awk '{ \
$(OUTDIR)/%.o: %.cpp $(OUTDIR)/%.o: %.cpp
@echo Compiling $< @echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET) $(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) $(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
# Some object files are given a prefix, to disambiguate # Some object files are given a prefix, to disambiguate
# them from objects of the same name built for the VM. # them from objects of the same name built for the VM.
$(OUTDIR)/adlc-%.o: %.cpp $(OUTDIR)/adlc-%.o: %.cpp
@echo Compiling $< @echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET) $(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) $(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
# ######################################################################### # #########################################################################
......
...@@ -30,9 +30,13 @@ ...@@ -30,9 +30,13 @@
ifdef CROSS_COMPILE_ARCH ifdef CROSS_COMPILE_ARCH
CPP = $(ALT_COMPILER_PATH)/g++ CPP = $(ALT_COMPILER_PATH)/g++
CC = $(ALT_COMPILER_PATH)/gcc CC = $(ALT_COMPILER_PATH)/gcc
HOSTCPP = g++
HOSTCC = gcc
else else
CPP = g++ CPP = g++
CC = gcc CC = gcc
HOSTCPP = $(CPP)
HOSTCC = $(CC)
endif endif
AS = $(CC) -c AS = $(CC) -c
......
...@@ -55,6 +55,14 @@ LINK_NOPROF.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) ...@@ -55,6 +55,14 @@ LINK_NOPROF.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG) LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E PREPROCESS.CC = $(CC_COMPILE) -E
# cross compiling the jvm with c2 requires host compilers to build
# adlc tool
HOST.CC_COMPILE = $(HOSTCPP) $(CPPFLAGS) $(CFLAGS)
HOST.COMPILE.CC = $(HOST.CC_COMPILE) -c
HOST.LINK_NOPROF.CC = $(HOSTCPP) $(LFLAGS) $(AOUT_FLAGS)
# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k". # Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
REMOVE_TARGET = rm -f $@ REMOVE_TARGET = rm -f $@
......
...@@ -29,6 +29,9 @@ CPP = CC ...@@ -29,6 +29,9 @@ CPP = CC
CC = cc CC = cc
AS = $(CC) -c AS = $(CC) -c
HOSTCPP = $(CPP)
HOSTCC = $(CC)
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
ARCHFLAG/i486 = -m32 ARCHFLAG/i486 = -m32
ARCHFLAG/amd64 = -m64 ARCHFLAG/amd64 = -m64
......
...@@ -1843,6 +1843,10 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong; ...@@ -1843,6 +1843,10 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs // registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = false; const bool Matcher::clone_shift_expressions = false;
// Do we need to mask the count passed to shift instructions or does
// the cpu only look at the lower 5/6 bits anyway?
const bool Matcher::need_masked_shift_count = false;
bool Matcher::narrow_oop_use_complex_address() { bool Matcher::narrow_oop_use_complex_address() {
NOT_LP64(ShouldNotCallThis()); NOT_LP64(ShouldNotCallThis());
assert(UseCompressedOops, "only for compressed oops code"); assert(UseCompressedOops, "only for compressed oops code");
......
...@@ -1393,6 +1393,10 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong; ...@@ -1393,6 +1393,10 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs // registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true; const bool Matcher::clone_shift_expressions = true;
// Do we need to mask the count passed to shift instructions or does
// the cpu only look at the lower 5/6 bits anyway?
const bool Matcher::need_masked_shift_count = false;
bool Matcher::narrow_oop_use_complex_address() { bool Matcher::narrow_oop_use_complex_address() {
ShouldNotCallThis(); ShouldNotCallThis();
return true; return true;
......
...@@ -2000,6 +2000,10 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong; ...@@ -2000,6 +2000,10 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// into registers? True for Intel but false for most RISCs // into registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true; const bool Matcher::clone_shift_expressions = true;
// Do we need to mask the count passed to shift instructions or does
// the cpu only look at the lower 5/6 bits anyway?
const bool Matcher::need_masked_shift_count = false;
bool Matcher::narrow_oop_use_complex_address() { bool Matcher::narrow_oop_use_complex_address() {
assert(UseCompressedOops, "only for compressed oops code"); assert(UseCompressedOops, "only for compressed oops code");
return (LogMinObjAlignmentInBytes <= 3); return (LogMinObjAlignmentInBytes <= 3);
......
...@@ -239,6 +239,11 @@ int main(int argc, char *argv[]) ...@@ -239,6 +239,11 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "assembler_sparc.inline.hpp"); AD.addInclude(AD._CPP_file, "assembler_sparc.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_sparc.hpp"); AD.addInclude(AD._CPP_file, "nativeInst_sparc.hpp");
AD.addInclude(AD._CPP_file, "vmreg_sparc.inline.hpp"); AD.addInclude(AD._CPP_file, "vmreg_sparc.inline.hpp");
#endif
#ifdef TARGET_ARCH_arm
AD.addInclude(AD._CPP_file, "assembler_arm.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_arm.hpp");
AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
#endif #endif
AD.addInclude(AD._HPP_file, "memory/allocation.hpp"); AD.addInclude(AD._HPP_file, "memory/allocation.hpp");
AD.addInclude(AD._HPP_file, "opto/machnode.hpp"); AD.addInclude(AD._HPP_file, "opto/machnode.hpp");
......
...@@ -673,7 +673,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { ...@@ -673,7 +673,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
case Op_RegD: case Op_RegD:
lrg.set_num_regs(2); lrg.set_num_regs(2);
// Define platform specific register pressure // Define platform specific register pressure
#ifdef SPARC #if defined(SPARC) || defined(ARM)
lrg.set_reg_pressure(2); lrg.set_reg_pressure(2);
#elif defined(IA32) #elif defined(IA32)
if( ireg == Op_RegL ) { if( ireg == Op_RegL ) {
......
...@@ -2544,6 +2544,36 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) { ...@@ -2544,6 +2544,36 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
frc.inc_inner_loop_count(); frc.inc_inner_loop_count();
} }
break; break;
case Op_LShiftI:
case Op_RShiftI:
case Op_URShiftI:
case Op_LShiftL:
case Op_RShiftL:
case Op_URShiftL:
if (Matcher::need_masked_shift_count) {
// The cpu's shift instructions don't restrict the count to the
// lower 5/6 bits. We need to do the masking ourselves.
Node* in2 = n->in(2);
juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
const TypeInt* t = in2->find_int_type();
if (t != NULL && t->is_con()) {
juint shift = t->get_con();
if (shift > mask) { // Unsigned cmp
Compile* C = Compile::current();
n->set_req(2, ConNode::make(C, TypeInt::make(shift & mask)));
}
} else {
if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
Compile* C = Compile::current();
Node* shift = new (C, 3) AndINode(in2, ConNode::make(C, TypeInt::make(mask)));
n->set_req(2, shift);
}
}
if (in2->outcnt() == 0) { // Remove dead node
in2->disconnect_inputs(NULL);
}
}
break;
default: default:
assert( !n->is_Call(), "" ); assert( !n->is_Call(), "" );
assert( !n->is_Mem(), "" ); assert( !n->is_Mem(), "" );
......
...@@ -42,6 +42,9 @@ ...@@ -42,6 +42,9 @@
#ifdef TARGET_ARCH_MODEL_zero #ifdef TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp" # include "adfiles/ad_zero.hpp"
#endif #endif
#ifdef TARGET_ARCH_MODEL_arm
# include "adfiles/ad_arm.hpp"
#endif
// Optimization - Graph Style // Optimization - Graph Style
......
...@@ -49,6 +49,9 @@ ...@@ -49,6 +49,9 @@
#ifdef TARGET_ARCH_MODEL_zero #ifdef TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp" # include "adfiles/ad_zero.hpp"
#endif #endif
#ifdef TARGET_ARCH_MODEL_arm
# include "adfiles/ad_arm.hpp"
#endif
OptoReg::Name OptoReg::c_frame_pointer; OptoReg::Name OptoReg::c_frame_pointer;
......
...@@ -427,6 +427,11 @@ public: ...@@ -427,6 +427,11 @@ public:
// Do ints take an entire long register or just half? // Do ints take an entire long register or just half?
static const bool int_in_long; static const bool int_in_long;
// Do the processor's shift instructions only use the low 5/6 bits
// of the count for 32/64 bit ints? If not we need to do the masking
// ourselves.
static const bool need_masked_shift_count;
// This routine is run whenever a graph fails to match. // This routine is run whenever a graph fails to match.
// If it returns, the compiler should bailout to interpreter without error. // If it returns, the compiler should bailout to interpreter without error.
// In non-product mode, SoftMatchFailure is false to detect non-canonical // In non-product mode, SoftMatchFailure is false to detect non-canonical
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册