提交 2ada4bce 编写于 作者: D dbuck

8141491: Unaligned memory access in Bits.c

Summary: Introduce alignment-safe Copy::conjoint_swap and JVM_CopySwapMemory
Reviewed-by: mikael, dholmes
上级 26c45876
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -63,6 +63,7 @@ SUNWprivate_1.1 {
JVM_ConstantPoolGetSize;
JVM_ConstantPoolGetStringAt;
JVM_ConstantPoolGetUTF8At;
JVM_CopySwapMemory;
JVM_CountStackFrames;
JVM_CurrentClassLoader;
JVM_CurrentLoadedClass;
......
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -63,6 +63,7 @@ SUNWprivate_1.1 {
JVM_ConstantPoolGetSize;
JVM_ConstantPoolGetStringAt;
JVM_ConstantPoolGetUTF8At;
JVM_CopySwapMemory;
JVM_CountStackFrames;
JVM_CurrentClassLoader;
JVM_CurrentLoadedClass;
......
#
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -61,6 +61,7 @@
_JVM_ConstantPoolGetSize
_JVM_ConstantPoolGetStringAt
_JVM_ConstantPoolGetUTF8At
_JVM_CopySwapMemory
_JVM_CountStackFrames
_JVM_CurrentClassLoader
_JVM_CurrentLoadedClass
......
#
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -61,6 +61,7 @@
_JVM_ConstantPoolGetSize
_JVM_ConstantPoolGetStringAt
_JVM_ConstantPoolGetUTF8At
_JVM_CopySwapMemory
_JVM_CountStackFrames
_JVM_CurrentClassLoader
_JVM_CurrentLoadedClass
......
#
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -63,6 +63,7 @@ SUNWprivate_1.1 {
JVM_ConstantPoolGetSize;
JVM_ConstantPoolGetStringAt;
JVM_ConstantPoolGetUTF8At;
JVM_CopySwapMemory;
JVM_CountStackFrames;
JVM_CurrentClassLoader;
JVM_CurrentLoadedClass;
......
#
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -63,6 +63,7 @@ SUNWprivate_1.1 {
JVM_ConstantPoolGetSize;
JVM_ConstantPoolGetStringAt;
JVM_ConstantPoolGetUTF8At;
JVM_CopySwapMemory;
JVM_CountStackFrames;
JVM_CurrentClassLoader;
JVM_CurrentLoadedClass;
......
#
# Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -64,6 +64,7 @@ SUNWprivate_1.1 {
JVM_ConstantPoolGetStringAt;
JVM_ConstantPoolGetUTF8At;
JVM_CountStackFrames;
JVM_CopySwapMemory;
JVM_CurrentClassLoader;
JVM_CurrentLoadedClass;
JVM_CurrentThread;
......
......@@ -759,6 +759,79 @@ JVM_LEAF(char*, JVM_NativePath(char* path))
JVM_END
// java.nio.Bits ///////////////////////////////////////////////////////////////
#define MAX_OBJECT_SIZE \
( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
+ ((julong)max_jint * sizeof(double)) )
static inline jlong field_offset_to_byte_offset(jlong field_offset) {
return field_offset;
}
static inline void assert_field_offset_sane(oop p, jlong field_offset) {
#ifdef ASSERT
jlong byte_offset = field_offset_to_byte_offset(field_offset);
if (p != NULL) {
assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
if (byte_offset == (jint)byte_offset) {
void* ptr_plus_disp = (address)p + byte_offset;
assert((void*)p->obj_field_addr<oop>((jint)byte_offset) == ptr_plus_disp,
"raw [ptr+disp] must be consistent with oop::field_base");
}
jlong p_size = HeapWordSize * (jlong)(p->size());
assert(byte_offset < p_size, err_msg("Unsafe access: offset " INT64_FORMAT
" > object's size " INT64_FORMAT,
(int64_t)byte_offset, (int64_t)p_size));
}
#endif
}
static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
assert_field_offset_sane(p, field_offset);
jlong byte_offset = field_offset_to_byte_offset(field_offset);
if (sizeof(char*) == sizeof(jint)) { // (this constant folds!)
return (address)p + (jint) byte_offset;
} else {
return (address)p + byte_offset;
}
}
// This function is a leaf since if the source and destination are both in native memory
// the copy may potentially be very large, and we don't want to disable GC if we can avoid it.
// If either source or destination (or both) are on the heap, the function will enter VM using
// JVM_ENTRY_FROM_LEAF
JVM_LEAF(void, JVM_CopySwapMemory(JNIEnv *env, jobject srcObj, jlong srcOffset,
jobject dstObj, jlong dstOffset, jlong size,
jlong elemSize)) {
size_t sz = (size_t)size;
size_t esz = (size_t)elemSize;
if (srcObj == NULL && dstObj == NULL) {
// Both src & dst are in native memory
address src = (address)srcOffset;
address dst = (address)dstOffset;
Copy::conjoint_swap(src, dst, sz, esz);
} else {
// At least one of src/dst are on heap, transition to VM to access raw pointers
JVM_ENTRY_FROM_LEAF(env, void, JVM_CopySwapMemory) {
oop srcp = JNIHandles::resolve(srcObj);
oop dstp = JNIHandles::resolve(dstObj);
address src = (address)index_oop_from_field_offset_long(srcp, srcOffset);
address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset);
Copy::conjoint_swap(src, dst, sz, esz);
} JVM_END
}
} JVM_END
// Misc. class handling ///////////////////////////////////////////////////////////
......
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -144,6 +144,14 @@ JVM_InitProperties(JNIEnv *env, jobject p);
JNIEXPORT void JNICALL
JVM_OnExit(void (*func)(void));
/*
* java.nio.Bits
*/
JNIEXPORT void JNICALL
JVM_CopySwapMemory(JNIEnv *env, jobject srcObj, jlong srcOffset,
jobject dstObj, jlong dstOffset, jlong size,
jlong elemSize);
/*
* java.lang.Runtime
*/
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -431,6 +431,14 @@ class RuntimeHistogramElement : public HistogramElement {
os::verify_stack_alignment(); \
/* begin of body */
#define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) \
TRACE_CALL(result_type, header) \
debug_only(ResetNoHandleMark __rnhm;) \
HandleMarkCleaner __hm(thread); \
Thread* THREAD = thread; \
os::verify_stack_alignment(); \
/* begin of body */
// ENTRY routines may lock, GC and throw exceptions
......@@ -592,6 +600,14 @@ extern "C" { \
VM_LEAF_BASE(result_type, header)
#define JVM_ENTRY_FROM_LEAF(env, result_type, header) \
{ { \
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
ThreadInVMfromNative __tiv(thread); \
debug_only(VMNativeEntryWrapper __vew;) \
VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
#define JVM_END } }
#endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -53,6 +53,175 @@ void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) {
}
}
class CopySwap : AllStatic {
public:
/**
* Copy and byte swap elements
*
* @param src address of source
* @param dst address of destination
* @param byte_count number of bytes to copy
* @param elem_size size of the elements to copy-swap
*/
static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
assert(src != NULL, "address must not be NULL");
assert(dst != NULL, "address must not be NULL");
assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
err_msg("incorrect element size: " SIZE_FORMAT, elem_size));
assert(is_size_aligned(byte_count, elem_size),
err_msg("byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size));
address src_end = src + byte_count;
if (dst <= src || dst >= src_end) {
do_conjoint_swap<RIGHT>(src, dst, byte_count, elem_size);
} else {
do_conjoint_swap<LEFT>(src, dst, byte_count, elem_size);
}
}
private:
/**
* Byte swap a 16-bit value
*/
static uint16_t byte_swap(uint16_t x) {
return (x << 8) | (x >> 8);
}
/**
* Byte swap a 32-bit value
*/
static uint32_t byte_swap(uint32_t x) {
uint16_t lo = (uint16_t)x;
uint16_t hi = (uint16_t)(x >> 16);
return ((uint32_t)byte_swap(lo) << 16) | (uint32_t)byte_swap(hi);
}
/**
* Byte swap a 64-bit value
*/
static uint64_t byte_swap(uint64_t x) {
uint32_t lo = (uint32_t)x;
uint32_t hi = (uint32_t)(x >> 32);
return ((uint64_t)byte_swap(lo) << 32) | (uint64_t)byte_swap(hi);
}
enum CopyDirection {
RIGHT, // lower -> higher address
LEFT // higher -> lower address
};
/**
* Copy and byte swap elements
*
* <T> - type of element to copy
* <D> - copy direction
* <is_src_aligned> - true if src argument is aligned to element size
* <is_dst_aligned> - true if dst argument is aligned to element size
*
* @param src address of source
* @param dst address of destination
* @param byte_count number of bytes to copy
*/
template <typename T, CopyDirection D, bool is_src_aligned, bool is_dst_aligned>
static void do_conjoint_swap(address src, address dst, size_t byte_count) {
address cur_src, cur_dst;
switch (D) {
case RIGHT:
cur_src = src;
cur_dst = dst;
break;
case LEFT:
cur_src = src + byte_count - sizeof(T);
cur_dst = dst + byte_count - sizeof(T);
break;
}
for (size_t i = 0; i < byte_count / sizeof(T); i++) {
T tmp;
if (is_src_aligned) {
tmp = *(T*)cur_src;
} else {
memcpy(&tmp, cur_src, sizeof(T));
}
tmp = byte_swap(tmp);
if (is_dst_aligned) {
*(T*)cur_dst = tmp;
} else {
memcpy(cur_dst, &tmp, sizeof(T));
}
switch (D) {
case RIGHT:
cur_src += sizeof(T);
cur_dst += sizeof(T);
break;
case LEFT:
cur_src -= sizeof(T);
cur_dst -= sizeof(T);
break;
}
}
}
/**
* Copy and byte swap elements
*
* <T> - type of element to copy
* <D> - copy direction
*
* @param src address of source
* @param dst address of destination
* @param byte_count number of bytes to copy
*/
template <typename T, CopyDirection direction>
static void do_conjoint_swap(address src, address dst, size_t byte_count) {
if (is_ptr_aligned(src, sizeof(T))) {
if (is_ptr_aligned(dst, sizeof(T))) {
do_conjoint_swap<T,direction,true,true>(src, dst, byte_count);
} else {
do_conjoint_swap<T,direction,true,false>(src, dst, byte_count);
}
} else {
if (is_ptr_aligned(dst, sizeof(T))) {
do_conjoint_swap<T,direction,false,true>(src, dst, byte_count);
} else {
do_conjoint_swap<T,direction,false,false>(src, dst, byte_count);
}
}
}
/**
* Copy and byte swap elements
*
* <D> - copy direction
*
* @param src address of source
* @param dst address of destination
* @param byte_count number of bytes to copy
* @param elem_size size of the elements to copy-swap
*/
template <CopyDirection D>
static void do_conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
switch (elem_size) {
case 2: do_conjoint_swap<uint16_t,D>(src, dst, byte_count); break;
case 4: do_conjoint_swap<uint32_t,D>(src, dst, byte_count); break;
case 8: do_conjoint_swap<uint64_t,D>(src, dst, byte_count); break;
default: guarantee(false, err_msg("do_conjoint_swap: Invalid elem_size %zd\n", elem_size));
}
}
};
void Copy::conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
CopySwap::conjoint_swap(src, dst, byte_count, elem_size);
}
// Fill bytes; larger units are filled atomically if everything is aligned.
void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
......
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -227,6 +227,16 @@ class Copy : AllStatic {
}
}
/**
* Copy and *unconditionally* byte swap elements
*
* @param src address of source
* @param dst address of destination
* @param byte_count number of bytes to copy
* @param elem_size size of the elements to copy-swap
*/
static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size);
// Fill methods
// Fill word-aligned words, not atomic on each word
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册