未验证 提交 615e8a20 编写于 作者: J Jacek Czaja 提交者: GitHub

- oneDNN update 1.3 -> 1.5 (#25202)

* - introducing oneDNN 1.6

test=develop

* - onednn 1.5

test=develop

* - oneDNN 1.5

test=develop

* - oneDNN 1.5 bugfix

test=develop
上级 2efcb481
......@@ -20,7 +20,7 @@ SET(MKLDNN_SOURCE_DIR ${THIRD_PARTY_PATH}/mkldnn/src/extern_mkldnn)
SET(MKLDNN_INSTALL_DIR ${THIRD_PARTY_PATH}/install/mkldnn)
SET(MKLDNN_INC_DIR "${MKLDNN_INSTALL_DIR}/include" CACHE PATH "mkldnn include directory." FORCE)
SET(MKLDNN_REPOSITORY https://github.com/intel/mkl-dnn.git)
SET(MKLDNN_TAG fb95345126ade4c54f5507e580a5f5da8d30a515)
SET(MKLDNN_TAG 1ea812f4f5aa1bd989372a23ab50d0f0f81ee677)
# Introduce variables:
# * CMAKE_INSTALL_LIBDIR
......
......@@ -105,8 +105,11 @@ class TestDnnlMatMulOpInt8NoScales(TestDnnlMatMulOp):
class TestDnnlMatMulOpInt8(TestDnnlMatMulOp):
# Due to limitation in int8 matmul implementation
# on older platforms (BDW, SKX) we needed to reduce
# range from [-127, 127] to [-63, 63]
def quantize(self, tensor):
scale = 127. / np.abs(np.amax(tensor))
scale = 63. / np.abs(np.amax(tensor))
quantized = np.round(scale * tensor).astype("int8")
return scale, quantized
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册