提交 84213d5a 编写于 作者: M mikael

8003310: Enable -Wunused-function when compiling with gcc

Summary: Add the -Wunused-function flag and remove a number of unused functions.
Reviewed-by: dholmes, coleenp, kvn
上级 24402beb
......@@ -126,14 +126,12 @@ endif
# Compiler warnings are treated as errors
WARNINGS_ARE_ERRORS = -Werror
# Except for a few acceptable ones
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
# conversions which might affect the values. To avoid that, we need to turn
# it off explicitly.
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
else
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
# conversions which might affect the values. Only enable it in earlier versions.
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
WARNING_FLAGS += -Wconversion
endif
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
......
......@@ -214,14 +214,6 @@ static int encode(Register r) {
return enc;
}
static int encode(XMMRegister r) {
int enc = r->encoding();
if (enc >= 8) {
enc -= 8;
}
return enc;
}
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
assert(dst->has_byte_register(), "must have byte register");
assert(isByte(op1) && isByte(op2), "wrong opcode");
......
......@@ -41,11 +41,6 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
static RegisterOrConstant constant(int value) {
return RegisterOrConstant(value);
}
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
if (VerifyMethodHandles)
verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
......
......@@ -1669,17 +1669,6 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return PTR_RBP_REG_mask();
}
static Address build_address(int b, int i, int s, int d) {
Register index = as_Register(i);
Address::ScaleFactor scale = (Address::ScaleFactor)s;
if (index == rsp) {
index = noreg;
scale = Address::no_scale;
}
Address addr(as_Register(b), index, scale, d);
return addr;
}
%}
//----------ENCODING BLOCK-----------------------------------------------------
......
......@@ -152,7 +152,6 @@ sigset_t SR_sigset;
// utility functions
static int SR_initialize();
static int SR_finalize();
julong os::available_memory() {
return Bsd::available_memory();
......@@ -2783,10 +2782,6 @@ static int SR_initialize() {
return 0;
}
static int SR_finalize() {
return 0;
}
// returns true on success and false on error - really an error is fatal
// but this seems the normal response to library errors
......@@ -3595,16 +3590,6 @@ int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex,
////////////////////////////////////////////////////////////////////////////////
// debug support
static address same_page(address x, address y) {
int page_bits = -os::vm_page_size();
if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
return x;
else if (x > y)
return (address)(intptr_t(y) | ~page_bits) + 1;
else
return (address)(intptr_t(y) & page_bits);
}
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
......@@ -3628,8 +3613,8 @@ bool os::find(address addr, outputStream* st) {
if (Verbose) {
// decode some bytes around the PC
address begin = same_page(addr-40, addr);
address end = same_page(addr+40, addr);
address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
address lowest = (address) dlinfo.dli_sname;
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
......
......@@ -176,7 +176,6 @@ class MemNotifyThread: public Thread {
// utility functions
static int SR_initialize();
static int SR_finalize();
julong os::available_memory() {
return Linux::available_memory();
......@@ -3672,10 +3671,6 @@ static int SR_initialize() {
return 0;
}
static int SR_finalize() {
return 0;
}
// returns true on success and false on error - really an error is fatal
// but this seems the normal response to library errors
......@@ -4517,16 +4512,6 @@ int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mute
////////////////////////////////////////////////////////////////////////////////
// debug support
static address same_page(address x, address y) {
int page_bits = -os::vm_page_size();
if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
return x;
else if (x > y)
return (address)(intptr_t(y) | ~page_bits) + 1;
else
return (address)(intptr_t(y) & page_bits);
}
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
......@@ -4550,8 +4535,8 @@ bool os::find(address addr, outputStream* st) {
if (Verbose) {
// decode some bytes around the PC
address begin = same_page(addr-40, addr);
address end = same_page(addr+40, addr);
address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
address lowest = (address) dlinfo.dli_sname;
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
......
......@@ -5808,16 +5808,6 @@ int os::loadavg(double loadavg[], int nelem) {
//---------------------------------------------------------------------------------
static address same_page(address x, address y) {
intptr_t page_bits = -os::vm_page_size();
if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
return x;
else if (x > y)
return (address)(intptr_t(y) | ~page_bits) + 1;
else
return (address)(intptr_t(y) & page_bits);
}
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
......@@ -5843,8 +5833,8 @@ bool os::find(address addr, outputStream* st) {
if (Verbose) {
// decode some bytes around the PC
address begin = same_page(addr-40, addr);
address end = same_page(addr+40, addr);
address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
address lowest = (address) dlinfo.dli_sname;
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
......
......@@ -707,25 +707,6 @@ static ciArrayKlass* as_array_klass(ciType* type) {
}
}
static Value maxvalue(IfOp* ifop) {
switch (ifop->cond()) {
case If::eql: return NULL;
case If::neq: return NULL;
case If::lss: // x < y ? x : y
case If::leq: // x <= y ? x : y
if (ifop->x() == ifop->tval() &&
ifop->y() == ifop->fval()) return ifop->y();
return NULL;
case If::gtr: // x > y ? y : x
case If::geq: // x >= y ? y : x
if (ifop->x() == ifop->tval() &&
ifop->y() == ifop->fval()) return ifop->y();
return NULL;
}
}
static ciType* phi_declared_type(Phi* phi) {
ciType* t = phi->operand_at(0)->declared_type();
if (t == NULL) {
......
......@@ -60,28 +60,6 @@ CompileLog::~CompileLog() {
}
// Advance kind up to a null or space, return this tail.
// Make sure kind is null-terminated, not space-terminated.
// Use the buffer if necessary.
static const char* split_attrs(const char* &kind, char* buffer) {
const char* attrs = strchr(kind, ' ');
// Tease apart the first word from the rest:
if (attrs == NULL) {
return ""; // no attrs, no split
} else if (kind == buffer) {
((char*) attrs)[-1] = 0;
return attrs;
} else {
// park it in the buffer, so we can put a null on the end
assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer");
int klen = attrs - kind;
strncpy(buffer, kind, klen);
buffer[klen] = 0;
kind = buffer; // return by reference
return attrs;
}
}
// see_tag, pop_tag: Override the default do-nothing methods on xmlStream.
// These methods provide a hook for managing the the extra context markup.
void CompileLog::see_tag(const char* tag, bool push) {
......
......@@ -237,13 +237,6 @@ static const char * command_names[] = {
"help"
};
static const char * command_name(OracleCommand command) {
if (command < OracleFirstCommand || command >= OracleCommandCount) {
return "unknown command";
}
return command_names[command];
}
class MethodMatcher;
static MethodMatcher* lists[OracleCommandCount] = { 0, };
......
......@@ -1359,18 +1359,6 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
#endif // PRODUCT
}
#ifndef PRODUCT
// for debugging, bit of a hack...
static char*
region_num_to_mbs(int length) {
static char buffer[64];
double bytes = (double) (length * HeapRegion::GrainBytes);
double mbs = bytes / (double) (1024 * 1024);
sprintf(buffer, "%7.2lfMB", mbs);
return buffer;
}
#endif // PRODUCT
uint G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) {
case GCAllocForSurvived:
......
......@@ -53,15 +53,6 @@ void PtrQueue::flush() {
}
static int byte_index_to_index(int ind) {
assert((ind % oopSize) == 0, "Invariant.");
return ind / oopSize;
}
static int index_to_byte_index(int byte_ind) {
return byte_ind * oopSize;
}
void PtrQueue::enqueue_known_active(void* ptr) {
assert(0 <= _index && _index <= _sz, "Invariant.");
assert(_index == 0 || _buf != NULL, "invariant");
......
......@@ -557,11 +557,6 @@ IRT_END
// be shared by method invocation and synchronized blocks.
//%note synchronization_3
static void trace_locking(Handle& h_locking_obj, bool is_locking) {
ObjectSynchronizer::trace_locking(h_locking_obj, false, true, is_locking);
}
//%note monitor_1
IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem))
#ifdef ASSERT
......
......@@ -79,13 +79,6 @@ static size_t align_to_page_size(size_t size) {
}
static size_t align_to_allocation_size(size_t size) {
const size_t alignment = (size_t)os::vm_allocation_granularity();
assert(is_power_of_2(alignment), "no kidding ???");
return (size + alignment - 1) & ~(alignment - 1);
}
void CodeHeap::on_code_mapping(char* base, size_t size) {
#ifdef LINUX
extern void linux_wrap_code(char* base, size_t size);
......
......@@ -1335,6 +1335,8 @@ static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1};
static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
#ifndef PRODUCT
static void calculate_verify_data(uintptr_t verify_data[2],
HeapWord* low_boundary,
HeapWord* high_boundary) {
......@@ -1369,9 +1371,7 @@ static void calculate_verify_data(uintptr_t verify_data[2],
verify_data[1] = bits;
}
// Oop verification (see MacroAssembler::verify_oop)
#ifndef PRODUCT
uintptr_t Universe::verify_oop_mask() {
MemRegion m = heap()->reserved_region();
......
......@@ -1378,12 +1378,13 @@ const char* ConstantPool::printable_name_at(int which) {
// JVMTI GetConstantPool support
// For temporary use until code is stable.
#define DBG(code)
// For debugging of constant pool
const bool debug_cpool = false;
static const char* WARN_MSG = "Must not be such entry!";
#define DBG(code) do { if (debug_cpool) { (code); } } while(0)
static void print_cpool_bytes(jint cnt, u1 *bytes) {
const char* WARN_MSG = "Must not be such entry!";
jint size = 0;
u2 idx1, idx2;
......@@ -1669,8 +1670,7 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
idx1 = tbl->symbol_to_value(sym);
assert(idx1 != 0, "Have not found a hashtable entry");
Bytes::put_Java_u2((address) (bytes+1), idx1);
DBG(char *str = sym->as_utf8());
DBG(printf("JVM_CONSTANT_String: idx=#%03hd, %s", idx1, str));
DBG(printf("JVM_CONSTANT_String: idx=#%03hd, %s", idx1, sym->as_utf8()));
break;
}
case JVM_CONSTANT_Fieldref:
......@@ -1745,6 +1745,8 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
return (int)(bytes - start_bytes);
} /* end copy_cpool_bytes */
#undef DBG
void ConstantPool::set_on_stack(const bool value) {
if (value) {
......
......@@ -1028,26 +1028,6 @@ void UnionFind::Union( uint idx1, uint idx2 ) {
}
#ifndef PRODUCT
static void edge_dump(GrowableArray<CFGEdge *> *edges) {
tty->print_cr("---- Edges ----");
for (int i = 0; i < edges->length(); i++) {
CFGEdge *e = edges->at(i);
if (e != NULL) {
edges->at(i)->dump();
}
}
}
static void trace_dump(Trace *traces[], int count) {
tty->print_cr("---- Traces ----");
for (int i = 0; i < count; i++) {
Trace *tr = traces[i];
if (tr != NULL) {
tr->dump();
}
}
}
void Trace::dump( ) const {
tty->print_cr("Trace (freq %f)", first_block()->_freq);
for (Block *b = first_block(); b != NULL; b = next(b)) {
......
......@@ -2326,12 +2326,14 @@ struct Final_Reshape_Counts : public StackObj {
int get_inner_loop_count() const { return _inner_loop_count; }
};
#ifdef ASSERT
static bool oop_offset_is_sane(const TypeInstPtr* tp) {
ciInstanceKlass *k = tp->klass()->as_instance_klass();
// Make sure the offset goes inside the instance layout.
return k->contains_field_offset(tp->offset());
// Note that OffsetBot and OffsetTop are very negative.
}
#endif
// Eliminate trivially redundant StoreCMs and accumulate their
// precedence edges.
......
......@@ -465,29 +465,6 @@ Node *CheckCastPPNode::Identity( PhaseTransform *phase ) {
return (phase->type(in(1)) == phase->type(this)) ? in(1) : this;
}
// Determine whether "n" is a node which can cause an alias of one of its inputs. Node types
// which can create aliases are: CheckCastPP, Phi, and any store (if there is also a load from
// the location.)
// Note: this checks for aliases created in this compilation, not ones which may
// be potentially created at call sites.
static bool can_cause_alias(Node *n, PhaseTransform *phase) {
bool possible_alias = false;
if (n->is_Store()) {
possible_alias = !n->as_Store()->value_never_loaded(phase);
} else {
int opc = n->Opcode();
possible_alias = n->is_Phi() ||
opc == Op_CheckCastPP ||
opc == Op_StorePConditional ||
opc == Op_CompareAndSwapP ||
opc == Op_CompareAndSwapN ||
opc == Op_GetAndSetP ||
opc == Op_GetAndSetN;
}
return possible_alias;
}
//------------------------------Value------------------------------------------
// Take 'join' of input and cast-up type, unless working with an Interface
const Type *CheckCastPPNode::Value( PhaseTransform *phase ) const {
......
......@@ -1078,16 +1078,6 @@ uint BoolNode::cmp( const Node &n ) const {
return (_test._test == b->_test._test);
}
//------------------------------clone_cmp--------------------------------------
// Clone a compare/bool tree
static Node *clone_cmp( Node *cmp, Node *cmp1, Node *cmp2, PhaseGVN *gvn, BoolTest::mask test ) {
Node *ncmp = cmp->clone();
ncmp->set_req(1,cmp1);
ncmp->set_req(2,cmp2);
ncmp = gvn->transform( ncmp );
return new (gvn->C) BoolNode( ncmp, test );
}
//-------------------------------make_predicate--------------------------------
Node* BoolNode::make_predicate(Node* test_value, PhaseGVN* phase) {
if (test_value->is_Con()) return test_value;
......
......@@ -1289,32 +1289,6 @@ enum JNICallType {
JNI_NONVIRTUAL
};
static methodHandle jni_resolve_interface_call(Handle recv, methodHandle method, TRAPS) {
assert(!method.is_null() , "method should not be null");
KlassHandle recv_klass; // Default to NULL (use of ?: can confuse gcc)
if (recv.not_null()) recv_klass = KlassHandle(THREAD, recv->klass());
KlassHandle spec_klass (THREAD, method->method_holder());
Symbol* name = method->name();
Symbol* signature = method->signature();
CallInfo info;
LinkResolver::resolve_interface_call(info, recv, recv_klass, spec_klass, name, signature, KlassHandle(), false, true, CHECK_(methodHandle()));
return info.selected_method();
}
static methodHandle jni_resolve_virtual_call(Handle recv, methodHandle method, TRAPS) {
assert(!method.is_null() , "method should not be null");
KlassHandle recv_klass; // Default to NULL (use of ?: can confuse gcc)
if (recv.not_null()) recv_klass = KlassHandle(THREAD, recv->klass());
KlassHandle spec_klass (THREAD, method->method_holder());
Symbol* name = method->name();
Symbol* signature = method->signature();
CallInfo info;
LinkResolver::resolve_virtual_call(info, recv, recv_klass, spec_klass, name, signature, KlassHandle(), false, true, CHECK_(methodHandle()));
return info.selected_method();
}
static void jni_invoke_static(JNIEnv *env, JavaValue* result, jobject receiver, JNICallType call_type, jmethodID method_id, JNI_ArgumentPusher *args, TRAPS) {
......@@ -5053,6 +5027,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
tty->print_cr("Running internal VM tests");
run_unit_test(GlobalDefinitions::test_globals());
run_unit_test(arrayOopDesc::test_max_array_length());
run_unit_test(CollectedHeap::test_is_in());
run_unit_test(QuickSort::test_quick_sort());
......
......@@ -33,7 +33,7 @@ extern "C" {
// within IN_VM macro), one to be called when in NATIVE state.
// When in VM state:
static void ReportJNIFatalError(JavaThread* thr, const char *msg) {
static inline void ReportJNIFatalError(JavaThread* thr, const char *msg) {
tty->print_cr("FATAL ERROR in native method: %s", msg);
thr->print_stack();
os::abort(true);
......
......@@ -1745,11 +1745,15 @@ bool Arguments::verify_percentage(uintx value, const char* name) {
return false;
}
#if !INCLUDE_ALL_GCS
#ifdef ASSERT
static bool verify_serial_gc_flags() {
return (UseSerialGC &&
!(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
UseParallelGC || UseParallelOldGC));
}
#endif // ASSERT
#endif // INCLUDE_ALL_GCS
// check if do gclog rotation
// +UseGCLogFileRotation is a must,
......@@ -3085,6 +3089,7 @@ do { \
} \
} while(0)
#if !INCLUDE_ALL_GCS
static void force_serial_gc() {
FLAG_SET_DEFAULT(UseSerialGC, true);
FLAG_SET_DEFAULT(CMSIncrementalMode, false); // special CMS suboption
......@@ -3094,6 +3099,7 @@ static void force_serial_gc() {
UNSUPPORTED_GC_OPTION(UseConcMarkSweepGC);
UNSUPPORTED_GC_OPTION(UseParNewGC);
}
#endif // INCLUDE_ALL_GCS
// Parse entry point called from JNI_CreateJavaVM
......
......@@ -735,6 +735,9 @@ void SafepointSynchronize::block(JavaThread *thread) {
// Exception handlers
#ifndef PRODUCT
#ifdef SPARC
#ifdef _LP64
#define PTR_PAD ""
#else
......@@ -755,7 +758,6 @@ static void print_longs(jlong oldptr, jlong newptr, bool wasoop) {
newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" "));
}
#ifdef SPARC
static void print_me(intptr_t *new_sp, intptr_t *old_sp, bool *was_oops) {
#ifdef _LP64
tty->print_cr("--------+------address-----+------before-----------+-------after----------+");
......
......@@ -449,8 +449,6 @@ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
// and explicit fences (barriers) to control for architectural reordering performed
// by the CPU(s) or platform.
static int MBFence (int x) { OrderAccess::fence(); return x; }
struct SharedGlobals {
// These are highly shared mostly-read variables.
// To avoid false-sharing they need to be the sole occupants of a $ line.
......@@ -1639,11 +1637,6 @@ void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
#ifndef PRODUCT
void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled,
bool is_method, bool is_locking) {
// Don't know what to do here
}
// Verify all monitors in the monitor cache, the verification is weak.
void ObjectSynchronizer::verify() {
ObjectMonitor* block = gBlockList;
......
......@@ -121,7 +121,6 @@ class ObjectSynchronizer : AllStatic {
static void oops_do(OopClosure* f);
// debugging
static void trace_locking(Handle obj, bool is_compiled, bool is_method, bool is_locking) PRODUCT_RETURN;
static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
......
......@@ -608,18 +608,6 @@ extern "C" nmethod* findnm(intptr_t addr) {
return CodeCache::find_nmethod((address)addr);
}
static address same_page(address x, address y) {
intptr_t page_bits = -os::vm_page_size();
if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) {
return x;
} else if (x > y) {
return (address)(intptr_t(y) | ~page_bits) + 1;
} else {
return (address)(intptr_t(y) & page_bits);
}
}
// Another interface that isn't ambiguous in dbx.
// Can we someday rename the other find to hsfind?
extern "C" void hsfind(intptr_t x) {
......
......@@ -355,3 +355,33 @@ size_t lcm(size_t a, size_t b) {
return size_t(result);
}
#ifndef PRODUCT
void GlobalDefinitions::test_globals() {
intptr_t page_sizes[] = { os::vm_page_size(), 4096, 8192, 65536, 2*1024*1024 };
const int num_page_sizes = sizeof(page_sizes) / sizeof(page_sizes[0]);
for (int i = 0; i < num_page_sizes; i++) {
intptr_t page_size = page_sizes[i];
address a_page = (address)(10*page_size);
// Check that address within page is returned as is
assert(clamp_address_in_page(a_page, a_page, page_size) == a_page, "incorrect");
assert(clamp_address_in_page(a_page + 128, a_page, page_size) == a_page + 128, "incorrect");
assert(clamp_address_in_page(a_page + page_size - 1, a_page, page_size) == a_page + page_size - 1, "incorrect");
// Check that address above page returns start of next page
assert(clamp_address_in_page(a_page + page_size, a_page, page_size) == a_page + page_size, "incorrect");
assert(clamp_address_in_page(a_page + page_size + 1, a_page, page_size) == a_page + page_size, "incorrect");
assert(clamp_address_in_page(a_page + page_size*5 + 1, a_page, page_size) == a_page + page_size, "incorrect");
// Check that address below page returns start of page
assert(clamp_address_in_page(a_page - 1, a_page, page_size) == a_page, "incorrect");
assert(clamp_address_in_page(a_page - 2*page_size - 1, a_page, page_size) == a_page, "incorrect");
assert(clamp_address_in_page(a_page - 5*page_size - 1, a_page, page_size) == a_page, "incorrect");
}
}
#endif // PRODUCT
......@@ -419,6 +419,24 @@ inline intptr_t align_object_offset(intptr_t offset) {
return align_size_up(offset, HeapWordsPerLong);
}
// Clamp an address to be within a specific page
// 1. If addr is on the page it is returned as is
// 2. If addr is above the page_address the start of the *next* page will be returned
// 3. Otherwise, if addr is below the page_address the start of the page will be returned
inline address clamp_address_in_page(address addr, address page_address, intptr_t page_size) {
if (align_size_down(intptr_t(addr), page_size) == align_size_down(intptr_t(page_address), page_size)) {
// address is in the specified page, just return it as is
return addr;
} else if (addr > page_address) {
// address is above specified page, return start of next page
return (address)align_size_down(intptr_t(page_address), page_size) + page_size;
} else {
// address is below specified page, return start of page
return (address)align_size_down(intptr_t(page_address), page_size);
}
}
// The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 64
......@@ -1296,4 +1314,15 @@ static inline void* dereference_vptr(void* addr) {
return *(void**)addr;
}
#ifndef PRODUCT
// For unit testing only
class GlobalDefinitions {
public:
static void test_globals();
};
#endif // PRODUCT
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册