提交 95711bc2 编写于 作者: O obscuren

Updated ethash

上级 387f6bba
......@@ -22,8 +22,8 @@
},
{
"ImportPath": "github.com/ethereum/ethash",
"Comment": "v17-64-ga323708",
"Rev": "a323708b8c4d253b8567bf6c72727d1aec302225"
"Comment": "v23-11-g5376ec8",
"Rev": "5376ec8816d6bf787d4fc91a08b4527bc5e1f469"
},
{
"ImportPath": "github.com/ethereum/serpent-go",
......
......@@ -8,3 +8,5 @@ pyethash.egg-info/
*.so
*~
*.swp
MANIFEST
dist/
cmake_minimum_required(VERSION 2.8.7)
project(ethash)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
set(ETHHASH_LIBS ethash)
if (WIN32 AND WANT_CRYPTOPP)
......@@ -10,6 +10,12 @@ endif()
add_subdirectory(src/libethash)
# bin2h.cmake doesn't work
#add_subdirectory(src/libethash-cl EXCLUDE_FROM_ALL)
if (NOT OpenCL_FOUND)
find_package(OpenCL)
endif()
if (OpenCL_FOUND)
add_subdirectory(src/libethash-cl)
endif()
add_subdirectory(src/benchmark EXCLUDE_FROM_ALL)
add_subdirectory(test/c EXCLUDE_FROM_ALL)
include setup.py
# C sources
include src/libethash/internal.c
include src/libethash/sha3.c
include src/libethash/util.c
include src/python/core.c
# Headers
include src/libethash/compiler.h
include src/libethash/data_sizes.h
include src/libethash/endian.h
include src/libethash/ethash.h
include src/libethash/fnv.h
include src/libethash/internal.h
include src/libethash/sha3.h
include src/libethash/util.h
.PHONY: clean
.PHONY: clean test
test:
./test/test.sh
clean:
rm -rf *.so pyethash.egg-info/ build/ test/python/python-virtual-env/ test/c/build/ pyethash/*.{so,pyc}
rm -rf *.so pyethash.egg-info/ build/ test/python/python-virtual-env/ test/c/build/ pyethash.so test/python/*.pyc dist/ MANIFEST
......@@ -8,6 +8,6 @@ file(GLOB SOURCE "../../cryptopp/*.cpp")
add_library(${LIBRARY} ${HEADERS} ${SOURCE})
set(CRYPTOPP_INCLUDE_DIRS "../.." PARENT_SCOPE)
set(CRYPTOPP_INCLUDE_DIRS "../.." "../../../" PARENT_SCOPE)
set(CRYPTOPP_LIBRARIES ${LIBRARY} PARENT_SCOPE)
set(CRYPTOPP_FOUND TRUE PARENT_SCOPE)
/*
###################################################################################
###################################################################################
#################### ####################
#################### EDIT AND YOU SHALL FEEL MY WRATH - jeff ####################
#################### ####################
###################################################################################
###################################################################################
*/
package ethash
/*
......@@ -11,8 +21,8 @@ import "C"
import (
"bytes"
"encoding/binary"
"fmt"
"io/ioutil"
"log"
"math/big"
"math/rand"
"os"
......@@ -21,25 +31,26 @@ import (
"time"
"unsafe"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethutil"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/pow"
)
var tt256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
var minDifficulty = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
var powlogger = logger.NewLogger("POW")
type DAG struct {
SeedBlockNum uint64
dag unsafe.Pointer // full GB of memory for dag
file bool
type ParamsAndCache struct {
params *C.ethash_params
cache *C.ethash_cache
Epoch uint64
}
type ParamsAndCache struct {
params *C.ethash_params
cache *C.ethash_cache
SeedBlockNum uint64
type DAG struct {
dag unsafe.Pointer // full GB of memory for dag
file bool
paramsAndCache *ParamsAndCache
}
type Ethash struct {
......@@ -48,10 +59,9 @@ type Ethash struct {
chainManager pow.ChainManager
dag *DAG
paramsAndCache *ParamsAndCache
nextdag unsafe.Pointer
ret *C.ethash_return_value
dagMutex *sync.Mutex
cacheMutex *sync.Mutex
dagMutex *sync.RWMutex
cacheMutex *sync.RWMutex
}
func parseNonce(nonce []byte) (uint64, error) {
......@@ -65,151 +75,212 @@ func parseNonce(nonce []byte) (uint64, error) {
const epochLength uint64 = 30000
func GetSeedBlockNum(blockNum uint64) uint64 {
var seedBlockNum uint64 = 0
if blockNum > epochLength {
seedBlockNum = ((blockNum - 1) / epochLength) * epochLength
func makeParamsAndCache(chainManager pow.ChainManager, blockNum uint64) (*ParamsAndCache, error) {
if blockNum >= epochLength*2048 {
return nil, fmt.Errorf("block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048)
}
return seedBlockNum
}
/*
XXX THIS DOESN'T WORK!! NEEDS FIXING
blockEpoch will underflow and wrap around causing massive issues
func GetSeedBlockNum(blockNum uint64) uint64 {
var seedBlockNum uint64 = 0
if blockNum > epochLength {
seedBlockNum = ((blockNum - 1) / epochLength) * epochLength
}
return seedBlockNum
}
*/
func makeParamsAndCache(chainManager pow.ChainManager, blockNum uint64) *ParamsAndCache {
seedBlockNum := GetSeedBlockNum(blockNum)
paramsAndCache := &ParamsAndCache{
params: new(C.ethash_params),
cache: new(C.ethash_cache),
SeedBlockNum: seedBlockNum,
params: new(C.ethash_params),
cache: new(C.ethash_cache),
Epoch: blockNum / epochLength,
}
C.ethash_params_init(paramsAndCache.params, C.uint32_t(seedBlockNum))
C.ethash_params_init(paramsAndCache.params, C.uint32_t(uint32(blockNum)))
paramsAndCache.cache.mem = C.malloc(paramsAndCache.params.cache_size)
seedHash := chainManager.GetBlockByNumber(seedBlockNum).SeedHash()
log.Println("Making Cache")
seedHash, err := GetSeedHash(blockNum)
if err != nil {
return nil, err
}
powlogger.Infoln("Making Cache")
start := time.Now()
C.ethash_mkcache(paramsAndCache.cache, paramsAndCache.params, (*C.uint8_t)(unsafe.Pointer(&seedHash[0])))
log.Println("Took:", time.Since(start))
powlogger.Infoln("Took:", time.Since(start))
return paramsAndCache
return paramsAndCache, nil
}
func (pow *Ethash) updateCache() {
func (pow *Ethash) UpdateCache(force bool) error {
pow.cacheMutex.Lock()
seedNum := GetSeedBlockNum(pow.chainManager.CurrentBlock().NumberU64())
if pow.paramsAndCache.SeedBlockNum != seedNum {
pow.paramsAndCache = makeParamsAndCache(pow.chainManager, pow.chainManager.CurrentBlock().NumberU64())
thisEpoch := pow.chainManager.CurrentBlock().NumberU64()
if force || pow.paramsAndCache.Epoch != thisEpoch {
var err error
pow.paramsAndCache, err = makeParamsAndCache(pow.chainManager, pow.chainManager.CurrentBlock().NumberU64())
if err != nil {
panic(err)
}
}
pow.cacheMutex.Unlock()
return nil
}
func makeDAG(p *ParamsAndCache) *DAG {
d := &DAG{
dag: C.malloc(p.params.full_size),
SeedBlockNum: p.SeedBlockNum,
dag: C.malloc(p.params.full_size),
file: false,
paramsAndCache: p,
}
donech := make(chan string)
go func() {
t := time.NewTicker(5 * time.Second)
tstart := time.Now()
done:
for {
select {
case <-t.C:
powlogger.Infof("... still generating DAG (%v) ...\n", time.Since(tstart).Seconds())
case str := <-donech:
powlogger.Infof("... %s ...\n", str)
break done
}
}
}()
C.ethash_compute_full_data(d.dag, p.params, p.cache)
donech <- "DAG generation completed"
return d
}
func (pow *Ethash) writeDagToDisk(dag *DAG, seedNum uint64) *os.File {
data := C.GoBytes(unsafe.Pointer(dag.dag), C.int(pow.paramsAndCache.params.full_size))
func (pow *Ethash) writeDagToDisk(dag *DAG, epoch uint64) *os.File {
if epoch > 2048 {
panic(fmt.Errorf("Epoch must be less than 2048 (is %v)", epoch))
}
data := C.GoBytes(unsafe.Pointer(dag.dag), C.int(dag.paramsAndCache.params.full_size))
file, err := os.Create("/tmp/dag")
if err != nil {
panic(err)
}
num := make([]byte, 8)
binary.BigEndian.PutUint64(num, seedNum)
dataEpoch := make([]byte, 8)
binary.BigEndian.PutUint64(dataEpoch, epoch)
file.Write(num)
file.Write(dataEpoch)
file.Write(data)
return file
}
func (pow *Ethash) UpdateDAG() {
pow.cacheMutex.Lock()
pow.dagMutex.Lock()
blockNum := pow.chainManager.CurrentBlock().NumberU64()
if blockNum >= epochLength*2048 {
// This will crash in the 2030s or 2040s
panic(fmt.Errorf("Current block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048))
}
seedNum := GetSeedBlockNum(pow.chainManager.CurrentBlock().NumberU64())
if pow.dag == nil || pow.dag.SeedBlockNum != seedNum {
pow.dagMutex.Lock()
defer pow.dagMutex.Unlock()
thisEpoch := blockNum / epochLength
if pow.dag == nil || pow.dag.paramsAndCache.Epoch != thisEpoch {
if pow.dag != nil && pow.dag.dag != nil {
C.free(pow.dag.dag)
pow.dag.dag = nil
}
if pow.dag != nil && pow.dag.paramsAndCache.cache.mem != nil {
C.free(pow.dag.paramsAndCache.cache.mem)
pow.dag.paramsAndCache.cache.mem = nil
}
// Make the params and cache for the DAG
paramsAndCache, err := makeParamsAndCache(pow.chainManager, blockNum)
if err != nil {
panic(err)
}
// TODO: On non-SSD disks, loading the DAG from disk takes longer than generating it in memory
pow.paramsAndCache = paramsAndCache
path := path.Join("/", "tmp", "dag")
pow.dag = nil
log.Println("Generating dag")
powlogger.Infoln("Retrieving DAG")
start := time.Now()
file, err := os.Open(path)
if err != nil {
log.Printf("No dag found in '%s'. Generating new dago(takes a while)...", path)
pow.dag = makeDAG(pow.paramsAndCache)
file = pow.writeDagToDisk(pow.dag, seedNum)
powlogger.Infof("No DAG found. Generating new DAG in '%s' (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else {
data, err := ioutil.ReadAll(file)
if err != nil {
panic(err)
powlogger.Infof("DAG load err: %v\n", err)
}
num := binary.BigEndian.Uint64(data[0:8])
if num < seedNum {
log.Printf("Old found. Generating new dag (takes a while)...")
pow.dag = makeDAG(pow.paramsAndCache)
file = pow.writeDagToDisk(pow.dag, seedNum)
if len(data) < 8 {
powlogger.Infof("DAG in '%s' is less than 8 bytes, it must be corrupted. Generating new DAG (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else {
data = data[8:]
pow.dag = &DAG{
dag: unsafe.Pointer(&data[0]),
file: true,
SeedBlockNum: pow.paramsAndCache.SeedBlockNum,
dataEpoch := binary.BigEndian.Uint64(data[0:8])
if dataEpoch < thisEpoch {
powlogger.Infof("DAG in '%s' is stale. Generating new DAG (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else if dataEpoch > thisEpoch {
// FIXME
panic(fmt.Errorf("Saved DAG in '%s' reports to be from future epoch %v (current epoch is %v)\n", path, dataEpoch, thisEpoch))
} else if len(data) != (int(paramsAndCache.params.full_size) + 8) {
powlogger.Infof("DAG in '%s' is corrupted. Generating new DAG (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else {
data = data[8:]
pow.dag = &DAG{
dag: unsafe.Pointer(&data[0]),
file: true,
paramsAndCache: paramsAndCache,
}
}
}
}
log.Println("Took:", time.Since(start))
powlogger.Infoln("Took:", time.Since(start))
file.Close()
}
pow.dagMutex.Unlock()
pow.cacheMutex.Unlock()
}
func New(chainManager pow.ChainManager) *Ethash {
paramsAndCache, err := makeParamsAndCache(chainManager, chainManager.CurrentBlock().NumberU64())
if err != nil {
panic(err)
}
return &Ethash{
turbo: true,
paramsAndCache: makeParamsAndCache(chainManager, chainManager.CurrentBlock().NumberU64()),
paramsAndCache: paramsAndCache,
chainManager: chainManager,
dag: nil,
cacheMutex: new(sync.Mutex),
dagMutex: new(sync.Mutex),
cacheMutex: new(sync.RWMutex),
dagMutex: new(sync.RWMutex),
}
}
func (pow *Ethash) DAGSize() uint64 {
return uint64(pow.paramsAndCache.params.full_size)
return uint64(pow.dag.paramsAndCache.params.full_size)
}
func (pow *Ethash) CacheSize() uint64 {
return uint64(pow.paramsAndCache.params.cache_size)
}
func (pow *Ethash) GetSeedHash(blockNum uint64) []byte {
seednum := GetSeedBlockNum(blockNum)
return pow.chainManager.GetBlockByNumber(seednum).SeedHash()
func GetSeedHash(blockNum uint64) ([]byte, error) {
if blockNum >= epochLength*2048 {
return nil, fmt.Errorf("block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048)
}
epoch := blockNum / epochLength
seedHash := make([]byte, 32)
var i uint64
for i = 0; i < 32; i++ {
seedHash[i] = 0
}
for i = 0; i < epoch; i++ {
seedHash = crypto.Sha3(seedHash)
}
return seedHash, nil
}
func (pow *Ethash) Stop() {
......@@ -224,17 +295,18 @@ func (pow *Ethash) Stop() {
if pow.dag.dag != nil && !pow.dag.file {
C.free(pow.dag.dag)
}
if pow.dag != nil && pow.dag.paramsAndCache != nil && pow.dag.paramsAndCache.cache.mem != nil {
C.free(pow.dag.paramsAndCache.cache.mem)
pow.dag.paramsAndCache.cache.mem = nil
}
pow.dag.dag = nil
}
func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte, []byte) {
//pow.UpdateDAG()
pow.UpdateDAG()
// Not very elegant, multiple mining instances are not supported
//pow.dagMutex.Lock()
//pow.cacheMutex.Lock()
//defer pow.cacheMutex.Unlock()
//defer pow.dagMutex.Unlock()
pow.dagMutex.RLock()
defer pow.dagMutex.RUnlock()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
miningHash := block.HashNoNonce()
......@@ -246,7 +318,7 @@ func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte
nonce := uint64(r.Int63())
cMiningHash := (*C.uint8_t)(unsafe.Pointer(&miningHash[0]))
target := new(big.Int).Div(tt256, diff)
target := new(big.Int).Div(minDifficulty, diff)
var ret C.ethash_return_value
for {
......@@ -262,14 +334,17 @@ func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte
hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000
pow.HashRate = int64(hashes)
C.ethash_full(&ret, pow.dag.dag, pow.paramsAndCache.params, cMiningHash, C.uint64_t(nonce))
C.ethash_full(&ret, pow.dag.dag, pow.dag.paramsAndCache.params, cMiningHash, C.uint64_t(nonce))
result := ethutil.Bytes2Big(C.GoBytes(unsafe.Pointer(&ret.result[0]), C.int(32)))
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
if result.Cmp(target) <= 0 {
mixDigest := C.GoBytes(unsafe.Pointer(&ret.mix_hash[0]), C.int(32))
return nonce, mixDigest, pow.GetSeedHash(block.NumberU64())
seedHash, err := GetSeedHash(block.NumberU64()) // This seedhash is useless
if err != nil {
panic(err)
}
return nonce, mixDigest, seedHash
}
nonce += 1
......@@ -283,32 +358,41 @@ func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte
}
func (pow *Ethash) Verify(block pow.Block) bool {
// Make sure the SeedHash is set correctly
if bytes.Compare(block.SeedHash(), pow.GetSeedHash(block.NumberU64())) != 0 {
return false
}
return pow.verify(block.HashNoNonce(), block.MixDigest(), block.Difficulty(), block.NumberU64(), block.Nonce())
}
func (pow *Ethash) verify(hash []byte, mixDigest []byte, difficulty *big.Int, blockNum uint64, nonce uint64) bool {
// First check: make sure header, mixDigest, nonce are correct without hitting the DAG
// Make sure the block num is valid
if blockNum >= epochLength*2048 {
powlogger.Infoln(fmt.Sprintf("Block number exceeds limit, invalid (value is %v, limit is %v)",
blockNum, epochLength*2048))
return false
}
// First check: make sure header, mixDigest, nonce are correct without hitting the cache
// This is to prevent DOS attacks
chash := (*C.uint8_t)(unsafe.Pointer(&hash[0]))
cnonce := C.uint64_t(nonce)
target := new(big.Int).Div(tt256, difficulty)
target := new(big.Int).Div(minDifficulty, difficulty)
var pAc *ParamsAndCache
// If its an old block (doesn't use the current cache)
// get the cache for it but don't update (so we don't need the mutex)
// Otherwise, it's the current block or a future.
// Otherwise, it's the current block or a future block.
// If current, updateCache will do nothing.
if GetSeedBlockNum(blockNum) < pow.paramsAndCache.SeedBlockNum {
pAc = makeParamsAndCache(pow.chainManager, blockNum)
if blockNum/epochLength < pow.paramsAndCache.Epoch {
var err error
// If we can't make the params for some reason, this block is invalid
pAc, err = makeParamsAndCache(pow.chainManager, blockNum)
if err != nil {
powlogger.Infoln(err)
return false
}
} else {
pow.updateCache()
pow.cacheMutex.Lock()
defer pow.cacheMutex.Unlock()
pow.UpdateCache(false)
pow.cacheMutex.RLock()
defer pow.cacheMutex.RUnlock()
pAc = pow.paramsAndCache
}
......
import pyethash.core
core = pyethash.core
EPOCH_LENGTH = 30000
#!/usr/bin/env python
from distutils.core import setup, Extension
pyethash_core = Extension('pyethash.core',
pyethash = Extension('pyethash',
sources = [
'src/python/core.c',
'src/libethash/util.c',
'src/libethash/internal.c',
'src/libethash/sha3.c'
'src/libethash/sha3.c'],
depends = [
'src/libethash/ethash.h',
'src/libethash/compiler.h',
'src/libethash/data_sizes.h',
'src/libethash/endian.h',
'src/libethash/ethash.h',
'src/libethash/fnv.h',
'src/libethash/internal.h',
'src/libethash/sha3.h',
'src/libethash/util.h'
],
extra_compile_args = ["-std=gnu99"])
extra_compile_args = ["-Isrc/", "-std=gnu99", "-Wall"])
setup (
name = 'pyethash',
author = "Matthew Wampler-Doty",
author_email = "matthew.wampler.doty@gmail.com",
license = 'GPL',
version = '1.0',
version = '23',
url = 'https://github.com/ethereum/ethash',
download_url = 'https://github.com/ethereum/ethash/tarball/v23',
description = 'Python wrappers for ethash, the ethereum proof of work hashing function',
ext_modules = [pyethash_core],
ext_modules = [pyethash],
)
......@@ -31,7 +31,7 @@
#include <algorithm>
#ifdef WITH_CRYPTOPP
#include <libethash/SHA3_cryptopp.h>
#include <libethash/sha3_cryptopp.h>
#include <string>
#else
......
......@@ -2,14 +2,39 @@ set(LIBRARY ethash-cl)
set(CMAKE_BUILD_TYPE Release)
include(bin2h.cmake)
bin2h(SOURCE_FILE ethash_cl_miner_kernel.cl VARIABLE_NAME ethash_cl_miner_kernel HEADER_FILE ${CMAKE_CURRENT_BINARY_DIR}/ethash_cl_miner_kernel.h)
bin2h(SOURCE_FILE ethash_cl_miner_kernel.cl
VARIABLE_NAME ethash_cl_miner_kernel
HEADER_FILE ${CMAKE_CURRENT_BINARY_DIR}/ethash_cl_miner_kernel.h)
if (NOT MSVC)
# Initialize CXXFLAGS for c++11
set(CMAKE_CXX_FLAGS "-Wall -std=c++11")
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g")
set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os -DNDEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-O4 -DNDEBUG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g")
# Compiler-specific C++11 activation.
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
execute_process(
COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
if (NOT (GCC_VERSION VERSION_GREATER 4.7 OR GCC_VERSION VERSION_EQUAL 4.7))
message(FATAL_ERROR "${PROJECT_NAME} requires g++ 4.7 or greater.")
endif ()
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
else ()
message(FATAL_ERROR "Your C++ compiler does not support C++11.")
endif ()
endif()
if (NOT OpenCL_FOUND)
find_package(OpenCL)
endif()
if (OpenCL_FOUND)
include_directories(${OpenCL_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
include_directories(..)
add_library(${LIBRARY} ethash_cl_miner.cpp ethash_cl_miner.h)
add_library(${LIBRARY} ethash_cl_miner.cpp ethash_cl_miner.h cl.hpp)
TARGET_LINK_LIBRARIES(${LIBRARY} ${OpenCL_LIBRARIES} ethash)
endif()
......@@ -68,8 +68,7 @@ function(BIN2H)
string(REGEX REPLACE ", $" "" arrayValues ${arrayValues})
# converts the variable name into proper C identifier
# TODO: fix for legacy cmake
IF (${CMAKE_VERSION} GREATER 2.8.10)
IF (${CMAKE_VERSION} GREATER 2.8.10) # fix for legacy cmake
string(MAKE_C_IDENTIFIER "${BIN2H_VARIABLE_NAME}" BIN2H_VARIABLE_NAME)
ENDIF()
string(TOUPPER "${BIN2H_VARIABLE_NAME}" BIN2H_VARIABLE_NAME)
......
......@@ -24,15 +24,22 @@
#include <assert.h>
#include <queue>
#include <vector>
#include "ethash_cl_miner.h"
#include "ethash_cl_miner_kernel.h"
#include <libethash/util.h>
#define ETHASH_BYTES 32
// workaround lame platforms
#if !CL_VERSION_1_2
#define CL_MAP_WRITE_INVALIDATE_REGION CL_MAP_WRITE
#define CL_MEM_HOST_READ_ONLY 0
#endif
#undef min
#undef max
#define HASH_BYTES 32
static void add_definition(std::string& source, char const* id, unsigned value)
{
char buf[256];
......@@ -41,6 +48,7 @@ static void add_definition(std::string& source, char const* id, unsigned value)
}
ethash_cl_miner::ethash_cl_miner()
: m_opencl_1_1()
{
}
......@@ -71,11 +79,23 @@ bool ethash_cl_miner::init(ethash_params const& params, const uint8_t seed[32],
}
// use default device
cl::Device& device = devices[0];
debugf("Using device: %s\n", device.getInfo<CL_DEVICE_NAME>().c_str());
unsigned device_num = 0;
cl::Device& device = devices[device_num];
std::string device_version = device.getInfo<CL_DEVICE_VERSION>();
debugf("Using device: %s (%s)\n", device.getInfo<CL_DEVICE_NAME>().c_str(),device_version.c_str());
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
{
debugf("OpenCL 1.0 is not supported.\n");
return false;
}
if (strncmp("OpenCL 1.1", device_version.c_str(), 10) == 0)
{
m_opencl_1_1 = true;
}
// create context
m_context = cl::Context({device});
m_context = cl::Context(std::vector<cl::Device>(&device, &device+1));
m_queue = cl::CommandQueue(m_context, device);
// use requested workgroup size, but we require multiple of 8
......@@ -120,7 +140,7 @@ bool ethash_cl_miner::init(ethash_params const& params, const uint8_t seed[32],
ethash_mkcache(&cache, &params, seed);
// if this throws then it's because we probably need to subdivide the dag uploads for compatibility
void* dag_ptr = m_queue.enqueueMapBuffer(m_dag, true, CL_MAP_WRITE_INVALIDATE_REGION, 0, params.full_size);
void* dag_ptr = m_queue.enqueueMapBuffer(m_dag, true, m_opencl_1_1 ? CL_MAP_WRITE : CL_MAP_WRITE_INVALIDATE_REGION, 0, params.full_size);
ethash_compute_full_data(dag_ptr, &params, &cache);
m_queue.enqueueUnmapMemObject(m_dag, dag_ptr);
......@@ -130,7 +150,7 @@ bool ethash_cl_miner::init(ethash_params const& params, const uint8_t seed[32],
// create mining buffers
for (unsigned i = 0; i != c_num_buffers; ++i)
{
m_hash_buf[i] = cl::Buffer(m_context, CL_MEM_WRITE_ONLY | CL_MEM_HOST_READ_ONLY, 32*c_hash_batch_size);
m_hash_buf[i] = cl::Buffer(m_context, CL_MEM_WRITE_ONLY | (!m_opencl_1_1 ? CL_MEM_HOST_READ_ONLY : 0), 32*c_hash_batch_size);
m_search_buf[i] = cl::Buffer(m_context, CL_MEM_WRITE_ONLY, (c_max_search_results + 1) * sizeof(uint32_t));
}
return true;
......@@ -176,7 +196,6 @@ void ethash_cl_miner::hash(uint8_t* ret, uint8_t const* header, uint64_t nonce,
m_hash_kernel.setArg(0, m_hash_buf[buf]);
// execute it!
clock_t start_time = clock();
m_queue.enqueueNDRangeKernel(
m_hash_kernel,
cl::NullRange,
......@@ -196,8 +215,8 @@ void ethash_cl_miner::hash(uint8_t* ret, uint8_t const* header, uint64_t nonce,
pending_batch const& batch = pending.front();
// could use pinned host pointer instead, but this path isn't that important.
uint8_t* hashes = (uint8_t*)m_queue.enqueueMapBuffer(m_hash_buf[batch.buf], true, CL_MAP_READ, 0, batch.count * HASH_BYTES);
memcpy(ret + batch.base*HASH_BYTES, hashes, batch.count*HASH_BYTES);
uint8_t* hashes = (uint8_t*)m_queue.enqueueMapBuffer(m_hash_buf[batch.buf], true, CL_MAP_READ, 0, batch.count * ETHASH_BYTES);
memcpy(ret + batch.base*ETHASH_BYTES, hashes, batch.count*ETHASH_BYTES);
m_queue.enqueueUnmapMemObject(m_hash_buf[batch.buf], hashes);
pending.pop();
......@@ -223,8 +242,19 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
{
m_queue.enqueueWriteBuffer(m_search_buf[i], false, 0, 4, &c_zero);
}
#if CL_VERSION_1_2
cl::Event pre_return_event;
m_queue.enqueueBarrierWithWaitList(NULL, &pre_return_event);
if (!m_opencl_1_1)
{
m_queue.enqueueBarrierWithWaitList(NULL, &pre_return_event);
}
else
#else
{
m_queue.finish();
}
#endif
/*
__kernel void ethash_combined_search(
......@@ -284,6 +314,11 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
}
// not safe to return until this is ready
pre_return_event.wait();
#if CL_VERSION_1_2
if (!m_opencl_1_1)
{
pre_return_event.wait();
}
#endif
}
......@@ -40,4 +40,5 @@ private:
cl::Buffer m_hash_buf[c_num_buffers];
cl::Buffer m_search_buf[c_num_buffers];
unsigned m_workgroup_size;
bool m_opencl_1_1;
};
\ No newline at end of file
......@@ -2,7 +2,6 @@ set(LIBRARY ethash)
if (CPPETHEREUM)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
#else ()
endif ()
set(CMAKE_BUILD_TYPE Release)
......
/*
This file is part of cpp-ethereum.
This file is part of ethash.
cpp-ethereum is free software: you can redistribute it and/or modify
ethash is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
ethash is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
along with ethash. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file ethash.h
......@@ -26,13 +26,15 @@
#include <stddef.h>
#include "compiler.h"
#define REVISION 20
#define DAGSIZE_BYTES_INIT 1073741824U // 2**30
#define DAG_GROWTH 8388608U // 2**23
#define CACHE_MULTIPLIER 1024
#define REVISION 23
#define DATASET_BYTES_INIT 1073741824U // 2**30
#define DATASET_BYTES_GROWTH 8388608U // 2**23
#define CACHE_BYTES_INIT 1073741824U // 2**24
#define CACHE_BYTES_GROWTH 131072U // 2**17
#define EPOCH_LENGTH 30000U
#define MIX_BYTES 128
#define DAG_PARENTS 256
#define HASH_BYTES 64
#define DATASET_PARENTS 256
#define CACHE_ROUNDS 3
#define ACCESSES 64
......@@ -60,19 +62,38 @@ static inline void ethash_params_init(ethash_params *params, const uint32_t bloc
}
typedef struct ethash_cache {
void *mem;
void *mem;
} ethash_cache;
void ethash_mkcache(ethash_cache *cache, ethash_params const *params, const uint8_t seed[32]);
void ethash_compute_full_data(void *mem, ethash_params const *params, ethash_cache const *cache);
void ethash_full(ethash_return_value *ret, void const *full_mem, ethash_params const *params, const uint8_t header_hash[32], const uint64_t nonce);
void ethash_light(ethash_return_value *ret, ethash_cache const *cache, ethash_params const *params, const uint8_t header_hash[32], const uint64_t nonce);
void ethash_get_seedhash(uint8_t seedhash[32], const uint32_t block_number);
static inline void ethash_prep_light(void *cache, ethash_params const *params, const uint8_t seed[32]) { ethash_cache c; c.mem = cache; ethash_mkcache(&c, params, seed); }
static inline void ethash_compute_light(ethash_return_value *ret, void const *cache, ethash_params const *params, const uint8_t header_hash[32], const uint64_t nonce) { ethash_cache c; c.mem = (void*)cache; ethash_light(ret, &c, params, header_hash, nonce); }
static inline void ethash_prep_full(void *full, ethash_params const *params, void const *cache) { ethash_cache c; c.mem = (void*)cache; ethash_compute_full_data(full, params, &c); }
static inline void ethash_compute_full(ethash_return_value *ret, void const *full, ethash_params const *params, const uint8_t header_hash[32], const uint64_t nonce) { ethash_full(ret, full, params, header_hash, nonce); }
static inline void ethash_prep_light(void *cache, ethash_params const *params, const uint8_t seed[32]) {
ethash_cache c;
c.mem = cache;
ethash_mkcache(&c, params, seed);
}
static inline void ethash_compute_light(ethash_return_value *ret, void const *cache, ethash_params const *params, const uint8_t header_hash[32], const uint64_t nonce) {
ethash_cache c;
c.mem = (void *) cache;
ethash_light(ret, &c, params, header_hash, nonce);
}
static inline void ethash_prep_full(void *full, ethash_params const *params, void const *cache) {
ethash_cache c;
c.mem = (void *) cache;
ethash_compute_full_data(full, params, &c);
}
static inline void ethash_compute_full(ethash_return_value *ret, void const *full, ethash_params const *params, const uint8_t header_hash[32], const uint64_t nonce) {
ethash_full(ret, full, params, header_hash, nonce);
}
// Returns if hash is less than or equal to difficulty
static inline int ethash_check_difficulty(
const uint8_t hash[32],
const uint8_t difficulty[32]) {
......@@ -81,7 +102,7 @@ static inline int ethash_check_difficulty(
if (hash[i] == difficulty[i]) continue;
return hash[i] < difficulty[i];
}
return 0;
return 1;
}
int ethash_quick_check_difficulty(
......
/*
This file is part of cpp-ethereum.
This file is part of ethash.
cpp-ethereum is free software: you can redistribute it and/or modify
ethash is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
ethash is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
......@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file dash.cpp
/** @file internal.c
* @author Tim Hughes <tim@twistedfury.com>
* @author Matthew Wampler-Doty
* @date 2015
......@@ -38,12 +38,12 @@
#endif // WITH_CRYPTOPP
size_t ethash_get_datasize(const uint32_t block_number) {
assert(block_number / EPOCH_LENGTH < 500);
assert(block_number / EPOCH_LENGTH < 2048);
return dag_sizes[block_number / EPOCH_LENGTH];
}
size_t ethash_get_cachesize(const uint32_t block_number) {
assert(block_number / EPOCH_LENGTH < 500);
assert(block_number / EPOCH_LENGTH < 2048);
return cache_sizes[block_number / EPOCH_LENGTH];
}
......@@ -55,7 +55,7 @@ void static ethash_compute_cache_nodes(
ethash_params const *params,
const uint8_t seed[32]) {
assert((params->cache_size % sizeof(node)) == 0);
uint32_t const num_nodes = (uint32_t)(params->cache_size / sizeof(node));
uint32_t const num_nodes = (uint32_t) (params->cache_size / sizeof(node));
SHA3_512(nodes[0].bytes, seed, 32);
......@@ -68,10 +68,9 @@ void static ethash_compute_cache_nodes(
uint32_t const idx = nodes[i].words[0] % num_nodes;
node data;
data = nodes[(num_nodes - 1 + i) % num_nodes];
for (unsigned w = 0; w != NODE_WORDS; ++w)
{
data.words[w] ^= nodes[idx].words[w];
}
for (unsigned w = 0; w != NODE_WORDS; ++w) {
data.words[w] ^= nodes[idx].words[w];
}
SHA3_512(nodes[i].bytes, data.bytes, sizeof(data));
}
}
......@@ -86,7 +85,7 @@ void static ethash_compute_cache_nodes(
}
void ethash_mkcache(
ethash_cache *cache,
ethash_cache *cache,
ethash_params const *params,
const uint8_t seed[32]) {
node *nodes = (node *) cache->mem;
......@@ -99,13 +98,13 @@ void ethash_calculate_dag_item(
const struct ethash_params *params,
const struct ethash_cache *cache) {
uint32_t num_parent_nodes = (uint32_t)(params->cache_size / sizeof(node));
uint32_t num_parent_nodes = (uint32_t) (params->cache_size / sizeof(node));
node const *cache_nodes = (node const *) cache->mem;
node const *init = &cache_nodes[node_index % num_parent_nodes];
memcpy(ret, init, sizeof(node));
ret->words[0] ^= node_index;
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
memcpy(ret, init, sizeof(node));
ret->words[0] ^= node_index;
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
#if defined(_M_X64) && ENABLE_SSE
__m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME);
......@@ -115,12 +114,11 @@ void ethash_calculate_dag_item(
__m128i xmm3 = ret->xmm[3];
#endif
for (unsigned i = 0; i != DAG_PARENTS; ++i)
{
uint32_t parent_index = ((node_index ^ i)*FNV_PRIME ^ ret->words[i % NODE_WORDS]) % num_parent_nodes;
for (unsigned i = 0; i != DATASET_PARENTS; ++i) {
uint32_t parent_index = ((node_index ^ i) * FNV_PRIME ^ ret->words[i % NODE_WORDS]) % num_parent_nodes;
node const *parent = &cache_nodes[parent_index];
#if defined(_M_X64) && ENABLE_SSE
#if defined(_M_X64) && ENABLE_SSE
{
xmm0 = _mm_mullo_epi32(xmm0, fnv_prime);
xmm1 = _mm_mullo_epi32(xmm1, fnv_prime);
......@@ -143,10 +141,10 @@ void ethash_calculate_dag_item(
ret->words[w] = fnv_hash(ret->words[w], parent->words[w]);
}
}
#endif
#endif
}
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
}
void ethash_compute_full_data(
......@@ -164,7 +162,7 @@ void ethash_compute_full_data(
}
static void ethash_hash(
ethash_return_value * ret,
ethash_return_value *ret,
node const *full_nodes,
ethash_cache const *cache,
ethash_params const *params,
......@@ -174,7 +172,7 @@ static void ethash_hash(
assert((params->full_size % MIX_WORDS) == 0);
// pack hash and nonce together into first 40 bytes of s_mix
assert(sizeof(node)*8 == 512);
assert(sizeof(node) * 8 == 512);
node s_mix[MIX_NODES + 1];
memcpy(s_mix[0].bytes, header_hash, 32);
......@@ -193,23 +191,21 @@ static void ethash_hash(
}
#endif
node* const mix = s_mix + 1;
node *const mix = s_mix + 1;
for (unsigned w = 0; w != MIX_WORDS; ++w) {
mix->words[w] = s_mix[0].words[w % NODE_WORDS];
}
unsigned const
page_size = sizeof(uint32_t) * MIX_WORDS,
num_full_pages = (unsigned)(params->full_size / page_size);
num_full_pages = (unsigned) (params->full_size / page_size);
for (unsigned i = 0; i != ACCESSES; ++i)
{
uint32_t const index = ((s_mix->words[0] ^ i)*FNV_PRIME ^ mix->words[i % MIX_WORDS]) % num_full_pages;
for (unsigned i = 0; i != ACCESSES; ++i) {
uint32_t const index = ((s_mix->words[0] ^ i) * FNV_PRIME ^ mix->words[i % MIX_WORDS]) % num_full_pages;
for (unsigned n = 0; n != MIX_NODES; ++n)
{
const node * dag_node = &full_nodes[MIX_NODES * index + n];
for (unsigned n = 0; n != MIX_NODES; ++n) {
const node *dag_node = &full_nodes[MIX_NODES * index + n];
if (!full_nodes) {
node tmp_node;
......@@ -217,7 +213,7 @@ static void ethash_hash(
dag_node = &tmp_node;
}
#if defined(_M_X64) && ENABLE_SSE
#if defined(_M_X64) && ENABLE_SSE
{
__m128i fnv_prime = _mm_set1_epi32(FNV_PRIME);
__m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]);
......@@ -235,20 +231,19 @@ static void ethash_hash(
mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]);
}
}
#endif
#endif
}
}
// compress mix
for (unsigned w = 0; w != MIX_WORDS; w += 4)
{
uint32_t reduction = mix->words[w+0];
reduction = reduction*FNV_PRIME ^ mix->words[w+1];
reduction = reduction*FNV_PRIME ^ mix->words[w+2];
reduction = reduction*FNV_PRIME ^ mix->words[w+3];
mix->words[w/4] = reduction;
}
// compress mix
for (unsigned w = 0; w != MIX_WORDS; w += 4) {
uint32_t reduction = mix->words[w + 0];
reduction = reduction * FNV_PRIME ^ mix->words[w + 1];
reduction = reduction * FNV_PRIME ^ mix->words[w + 2];
reduction = reduction * FNV_PRIME ^ mix->words[w + 3];
mix->words[w / 4] = reduction;
}
#if BYTE_ORDER != LITTLE_ENDIAN
for (unsigned w = 0; w != MIX_WORDS/4; ++w) {
......@@ -258,7 +253,7 @@ static void ethash_hash(
memcpy(ret->mix_hash, mix->bytes, 32);
// final Keccak hash
SHA3_256(ret->result, s_mix->bytes, 64+32); // Keccak-256(s + compressed_mix)
SHA3_256(ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix)
}
void ethash_quick_hash(
......@@ -267,7 +262,7 @@ void ethash_quick_hash(
const uint64_t nonce,
const uint8_t mix_hash[32]) {
uint8_t buf[64+32];
uint8_t buf[64 + 32];
memcpy(buf, header_hash, 32);
#if BYTE_ORDER != LITTLE_ENDIAN
nonce = fix_endian64(nonce);
......@@ -275,7 +270,14 @@ void ethash_quick_hash(
memcpy(&(buf[32]), &nonce, 8);
SHA3_512(buf, buf, 40);
memcpy(&(buf[64]), mix_hash, 32);
SHA3_256(return_hash, buf, 64+32);
SHA3_256(return_hash, buf, 64 + 32);
}
void ethash_get_seedhash(uint8_t seedhash[32], const uint32_t block_number) {
memset(seedhash, 0, 32);
const uint32_t epochs = block_number / EPOCH_LENGTH;
for (uint32_t i = 0; i < epochs; ++i)
SHA3_256(seedhash, seedhash, 32);
}
int ethash_quick_check_difficulty(
......@@ -289,10 +291,10 @@ int ethash_quick_check_difficulty(
return ethash_check_difficulty(return_hash, difficulty);
}
void ethash_full(ethash_return_value * ret, void const *full_mem, ethash_params const *params, const uint8_t previous_hash[32], const uint64_t nonce) {
void ethash_full(ethash_return_value *ret, void const *full_mem, ethash_params const *params, const uint8_t previous_hash[32], const uint64_t nonce) {
ethash_hash(ret, (node const *) full_mem, NULL, params, previous_hash, nonce);
}
void ethash_light(ethash_return_value * ret, ethash_cache const *cache, ethash_params const *params, const uint8_t previous_hash[32], const uint64_t nonce) {
void ethash_light(ethash_return_value *ret, ethash_cache const *cache, ethash_params const *params, const uint8_t previous_hash[32], const uint64_t nonce) {
ethash_hash(ret, NULL, cache, params, previous_hash, nonce);
}
}
\ No newline at end of file
......@@ -3,7 +3,7 @@
#include "endian.h"
#include "ethash.h"
#define ENABLE_SSE 1
#define ENABLE_SSE 0
#if defined(_M_X64) && ENABLE_SSE
#include <smmintrin.h>
......
/*
This file is part of cpp-ethereum.
This file is part of ethash.
cpp-ethereum is free software: you can redistribute it and/or modify
ethash is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
ethash is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
along with ethash. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file sha3.cpp
......
/*
This file is part of cpp-ethereum.
This file is part of ethash.
cpp-ethereum is free software: you can redistribute it and/or modify
ethash is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
cpp-ethereum is distributed in the hope that it will be useful,
ethash is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
along with ethash. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file util.h
* @author Tim Hughes <tim@twistedfury.com>
......
#include <Python.h>
#include <alloca.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#include "../libethash/ethash.h"
static PyObject*
get_cache_size(PyObject* self, PyObject* args)
{
#define MIX_WORDS (MIX_BYTES/4)
static PyObject *
get_cache_size(PyObject *self, PyObject *args) {
unsigned long block_number;
if (!PyArg_ParseTuple(args, "k", &block_number))
return 0;
return 0;
if (block_number >= EPOCH_LENGTH * 2048) {
char error_message[1024];
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
return Py_BuildValue("i", ethash_get_cachesize(block_number));
}
static PyObject*
get_full_size(PyObject* self, PyObject* args)
{
static PyObject *
get_full_size(PyObject *self, PyObject *args) {
unsigned long block_number;
if (!PyArg_ParseTuple(args, "k", &block_number))
return 0;
return 0;
if (block_number >= EPOCH_LENGTH * 2048) {
char error_message[1024];
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
return Py_BuildValue("i", ethash_get_datasize(block_number));
}
static PyObject*
mkcache(PyObject* self, PyObject* args)
{
char * seed;
static PyObject *
mkcache_bytes(PyObject *self, PyObject *args) {
char *seed;
unsigned long cache_size;
int seed_len;
if (!PyArg_ParseTuple(args, "ks#", &cache_size, &seed, &seed_len))
return 0;
return 0;
if (seed_len != 32) {
char error_message[1024];
sprintf(error_message, "Seed must be 32 bytes long (was %i)", seed_len);
if (seed_len != 32)
{
PyErr_SetString(PyExc_ValueError,
"Seed must be 32 bytes long");
return 0;
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
printf("cache size: %lu\n", cache_size);
ethash_params params;
params.cache_size = (size_t) cache_size;
ethash_cache cache;
cache.mem = alloca(cache_size);
cache.mem = malloc(cache_size);
ethash_mkcache(&cache, &params, (uint8_t *) seed);
return PyString_FromStringAndSize(cache.mem, cache_size);
PyObject * val = Py_BuildValue("s#", cache.mem, cache_size);
free(cache.mem);
return val;
}
static PyObject *
calc_dataset_bytes(PyObject *self, PyObject *args) {
char *cache_bytes;
unsigned long full_size;
int cache_size;
if (!PyArg_ParseTuple(args, "ks#", &full_size, &cache_bytes, &cache_size))
return 0;
if (full_size % MIX_WORDS != 0) {
char error_message[1024];
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %lu)", MIX_WORDS, full_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
if (cache_size % HASH_BYTES != 0) {
char error_message[1024];
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", HASH_BYTES, cache_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
ethash_params params;
params.cache_size = (size_t) cache_size;
params.full_size = (size_t) full_size;
ethash_cache cache;
cache.mem = (void *) cache_bytes;
void *mem = malloc(params.full_size);
ethash_compute_full_data(mem, &params, &cache);
PyObject * val = Py_BuildValue("s#", (char *) mem, full_size);
free(mem);
return val;
}
// hashimoto_light(full_size, cache, header, nonce)
static PyObject *
hashimoto_light(PyObject *self, PyObject *args) {
char *cache_bytes, *header;
unsigned long full_size;
unsigned long long nonce;
int cache_size, header_size;
if (!PyArg_ParseTuple(args, "ks#s#K", &full_size, &cache_bytes, &cache_size, &header, &header_size, &nonce))
return 0;
if (full_size % MIX_WORDS != 0) {
char error_message[1024];
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %lu)", MIX_WORDS, full_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
if (cache_size % HASH_BYTES != 0) {
char error_message[1024];
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", HASH_BYTES, cache_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
if (header_size != 32) {
char error_message[1024];
sprintf(error_message, "Seed must be 32 bytes long (was %i)", header_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
ethash_return_value out;
ethash_params params;
params.cache_size = (size_t) cache_size;
params.full_size = (size_t) full_size;
ethash_cache cache;
cache.mem = (void *) cache_bytes;
ethash_light(&out, &cache, &params, (uint8_t *) header, nonce);
return Py_BuildValue("{s:s#,s:s#}",
"mix digest", out.mix_hash, 32,
"result", out.result, 32);
}
// hashimoto_full(dataset, header, nonce)
static PyObject *
hashimoto_full(PyObject *self, PyObject *args) {
char *full_bytes, *header;
unsigned long long nonce;
int full_size, header_size;
if (!PyArg_ParseTuple(args, "s#s#K", &full_bytes, &full_size, &header, &header_size, &nonce))
return 0;
if (full_size % MIX_WORDS != 0) {
char error_message[1024];
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
if (header_size != 32) {
char error_message[1024];
sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
ethash_return_value out;
ethash_params params;
params.full_size = (size_t) full_size;
ethash_full(&out, (void *) full_bytes, &params, (uint8_t *) header, nonce);
return Py_BuildValue("{s:s#, s:s#}",
"mix digest", out.mix_hash, 32,
"result", out.result, 32);
}
// mine(dataset_bytes, header, difficulty_bytes)
static PyObject *
mine(PyObject *self, PyObject *args) {
char *full_bytes, *header, *difficulty;
srand(time(0));
uint64_t nonce = ((uint64_t) rand()) << 32 | rand();
int full_size, header_size, difficulty_size;
if (!PyArg_ParseTuple(args, "s#s#s#", &full_bytes, &full_size, &header, &header_size, &difficulty, &difficulty_size))
return 0;
if (full_size % MIX_WORDS != 0) {
char error_message[1024];
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
if (header_size != 32) {
char error_message[1024];
sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
if (difficulty_size != 32) {
char error_message[1024];
sprintf(error_message, "Difficulty must be an array of 32 bytes (only had %i)", difficulty_size);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
ethash_return_value out;
ethash_params params;
params.full_size = (size_t) full_size;
// TODO: Multi threading?
do {
ethash_full(&out, (void *) full_bytes, &params, (const uint8_t *) header, nonce++);
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
} while (!ethash_check_difficulty(out.result, (const uint8_t *) difficulty));
return Py_BuildValue("{s:s#, s:s#, s:K}",
"mix digest", out.mix_hash, 32,
"result", out.result, 32,
"nonce", nonce);
}
//get_seedhash(block_number)
static PyObject *
get_seedhash(PyObject *self, PyObject *args) {
unsigned long block_number;
if (!PyArg_ParseTuple(args, "k", &block_number))
return 0;
if (block_number >= EPOCH_LENGTH * 2048) {
char error_message[1024];
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number);
PyErr_SetString(PyExc_ValueError, error_message);
return 0;
}
uint8_t seedhash[32];
ethash_get_seedhash(seedhash, block_number);
return Py_BuildValue("s#", (char *) seedhash, 32);
}
static PyMethodDef CoreMethods[] =
{
{"get_cache_size", get_cache_size, METH_VARARGS, "Get the cache size for a given block number"},
{"get_full_size", get_full_size, METH_VARARGS, "Get the full size for a given block number"},
{"mkcache", mkcache, METH_VARARGS, "Makes the cache for given parameters and seed hash"},
{NULL, NULL, 0, NULL}
};
static PyMethodDef PyethashMethods[] =
{
{"get_cache_size", get_cache_size, METH_VARARGS,
"get_cache_size(block_number)\n\n"
"Get the cache size for a given block number\n"
"\nExample:\n"
">>> get_cache_size(0)\n"
"1048384"},
{"get_full_size", get_full_size, METH_VARARGS,
"get_full_size(block_number)\n\n"
"Get the full size for a given block number\n"
"\nExample:\n"
">>> get_full_size(0)\n"
"1073739904"
},
{"get_seedhash", get_seedhash, METH_VARARGS,
"get_seedhash(block_number)\n\n"
"Gets the seedhash for a block."},
{"mkcache_bytes", mkcache_bytes, METH_VARARGS,
"mkcache_bytes(size, header)\n\n"
"Makes a byte array for the cache for given cache size and seed hash\n"
"\nExample:\n"
">>> pyethash.mkcache_bytes( 1024, \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\").encode('hex')"
"\"2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b\""},
{"calc_dataset_bytes", calc_dataset_bytes, METH_VARARGS,
"calc_dataset_bytes(full_size, cache_bytes)\n\n"
"Makes a byte array for the dataset for a given size given cache bytes"},
{"hashimoto_light", hashimoto_light, METH_VARARGS,
"hashimoto_light(full_size, cache_bytes, header, nonce)\n\n"
"Runs the hashimoto hashing function just using cache bytes. Takes an int (full_size), byte array (cache_bytes), another byte array (header), and an int (nonce). Returns an object containing the mix digest, and hash result."},
{"hashimoto_full", hashimoto_full, METH_VARARGS,
"hashimoto_full(dataset_bytes, header, nonce)\n\n"
"Runs the hashimoto hashing function using the dataset bytes. Useful for testing. Returns an object containing the mix digest (byte array), and hash result (another byte array)."},
{"mine", mine, METH_VARARGS,
"mine(dataset_bytes, header, difficulty_bytes)\n\n"
"Mine for an adequate header. Returns an object containing the mix digest (byte array), hash result (another byte array) and nonce (an int)."},
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC
initcore(void)
{
(void) Py_InitModule("core", CoreMethods);
initpyethash(void) {
PyObject *module = Py_InitModule("pyethash", PyethashMethods);
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
PyModule_AddIntConstant(module, "REVISION", (long) REVISION);
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) DATASET_BYTES_INIT);
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) DATASET_BYTES_GROWTH);
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) CACHE_BYTES_INIT);
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) CACHE_BYTES_GROWTH);
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) EPOCH_LENGTH);
PyModule_AddIntConstant(module, "MIX_BYTES", (long) MIX_BYTES);
PyModule_AddIntConstant(module, "HASH_BYTES", (long) HASH_BYTES);
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) DATASET_PARENTS);
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) CACHE_ROUNDS);
PyModule_AddIntConstant(module, "ACCESSES", (long) ACCESSES);
}
......@@ -4,7 +4,9 @@
#include <libethash/internal.h>
#ifdef WITH_CRYPTOPP
#include <libethash/sha3_cryptopp.h>
#else
#include <libethash/sha3.h>
#endif // WITH_CRYPTOPP
......@@ -28,7 +30,7 @@ BOOST_AUTO_TEST_CASE(fnv_hash_check) {
uint32_t x = 1235U;
const uint32_t
y = 9999999U,
expected = (FNV_PRIME * x) ^ y;
expected = (FNV_PRIME * x) ^y;
x = fnv_hash(x, y);
......@@ -65,43 +67,50 @@ BOOST_AUTO_TEST_CASE(SHA512_check) {
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_check) {
ethash_params params;
ethash_params_init(&params, 0);
BOOST_REQUIRE_MESSAGE(params.full_size < DAGSIZE_BYTES_INIT,
BOOST_REQUIRE_MESSAGE(params.full_size < DATASET_BYTES_INIT,
"\nfull size: " << params.full_size << "\n"
<< "should be less than or equal to: " << DAGSIZE_BYTES_INIT << "\n");
BOOST_REQUIRE_MESSAGE(params.full_size + 20*MIX_BYTES >= DAGSIZE_BYTES_INIT,
"\nfull size + 20*MIX_BYTES: " << params.full_size + 20*MIX_BYTES << "\n"
<< "should be greater than or equal to: " << DAGSIZE_BYTES_INIT << "\n");
BOOST_REQUIRE_MESSAGE(params.cache_size < DAGSIZE_BYTES_INIT / 32,
<< "should be less than or equal to: " << DATASET_BYTES_INIT << "\n");
BOOST_REQUIRE_MESSAGE(params.full_size + 20 * MIX_BYTES >= DATASET_BYTES_INIT,
"\nfull size + 20*MIX_BYTES: " << params.full_size + 20 * MIX_BYTES << "\n"
<< "should be greater than or equal to: " << DATASET_BYTES_INIT << "\n");
BOOST_REQUIRE_MESSAGE(params.cache_size < DATASET_BYTES_INIT / 32,
"\ncache size: " << params.cache_size << "\n"
<< "should be less than or equal to: " << DAGSIZE_BYTES_INIT / 32 << "\n");
<< "should be less than or equal to: " << DATASET_BYTES_INIT / 32 << "\n");
}
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_calcifide_check) {
ethash_params params;
ethash_params_init(&params, 0);
const uint32_t expected_full_size = 1073739904;
const uint32_t expected_cache_size = 1048384;
BOOST_REQUIRE_MESSAGE(params.full_size == expected_full_size,
const uint32_t expected_cache_size = 16776896;
BOOST_REQUIRE_MESSAGE(params.full_size == expected_full_size,
"\nexpected: " << expected_cache_size << "\n"
<< "actual: " << params.full_size << "\n");
BOOST_REQUIRE_MESSAGE(params.cache_size == expected_cache_size,
BOOST_REQUIRE_MESSAGE(params.cache_size == expected_cache_size,
"\nexpected: " << expected_cache_size << "\n"
<< "actual: " << params.cache_size << "\n");
}
BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
ethash_params params;
uint8_t seed[32], hash[32];
uint8_t seed[32], hash[32], difficulty[32];
ethash_return_value light_out, full_out;
memcpy(seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
memcpy(hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
// Set the difficulty
difficulty[0] = 197;
difficulty[1] = 90;
for (int i = 2; i < 32; i++)
difficulty[i] = (uint8_t) 255;
ethash_params_init(&params, 0);
params.cache_size = 1024;
params.full_size = 1024 * 32;
ethash_cache cache;
cache.mem = alloca(params.cache_size);
ethash_mkcache(&cache, &params, seed);
node * full_mem = (node *) alloca(params.full_size);
node *full_mem = (node *) alloca(params.full_size);
ethash_compute_full_data(full_mem, &params, &cache);
{
......@@ -115,7 +124,6 @@ BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
}
{
node node;
ethash_calculate_dag_item(&node, 0, &params, &cache);
......@@ -128,7 +136,7 @@ BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
}
{
for (int i = 0 ; i < params.full_size / sizeof(node) ; ++i ) {
for (int i = 0; i < params.full_size / sizeof(node); ++i) {
for (uint32_t j = 0; j < 32; ++j) {
node expected_node;
ethash_calculate_dag_item(&expected_node, j, &params, &cache);
......@@ -187,6 +195,12 @@ BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
BOOST_REQUIRE_MESSAGE(ethash_check_difficulty(full_out.result, difficulty),
"ethash_check_difficulty failed"
);
BOOST_REQUIRE_MESSAGE(ethash_quick_check_difficulty(hash, 5U, full_out.mix_hash, difficulty),
"ethash_quick_check_difficulty failed"
);
}
}
......@@ -199,14 +213,14 @@ BOOST_AUTO_TEST_CASE(ethash_check_difficulty_check) {
memcpy(target, "22222222222222222222222222222222", 32);
BOOST_REQUIRE_MESSAGE(
ethash_check_difficulty(hash, target),
"\nexpected \"" << hash << "\" to have less difficulty than \"" << target << "\"\n");
"\nexpected \"" << std::string((char *) hash, 32).c_str() << "\" to have the same or less difficulty than \"" << std::string((char *) target, 32).c_str() << "\"\n");
BOOST_REQUIRE_MESSAGE(
!ethash_check_difficulty(hash, hash),
"\nexpected \"" << hash << "\" to have the same difficulty as \"" << hash << "\"\n");
ethash_check_difficulty(hash, hash),
"\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << hash << "\"\n");
memcpy(target, "11111111111111111111111111111112", 32);
BOOST_REQUIRE_MESSAGE(
ethash_check_difficulty(hash, target),
"\nexpected \"" << hash << "\" to have less difficulty than \"" << target << "\"\n");
"\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << target << "\"\n");
memcpy(target, "11111111111111111111111111111110", 32);
BOOST_REQUIRE_MESSAGE(
!ethash_check_difficulty(hash, target),
......
package ethashTest
import (
"bytes"
"crypto/rand"
"encoding/hex"
"log"
"math/big"
"testing"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/ethdb"
)
func TestEthash(t *testing.T) {
seedHash := make([]byte, 32)
_, err := rand.Read(seedHash)
if err != nil {
panic(err)
}
db, err := ethdb.NewMemDatabase()
if err != nil {
panic(err)
}
blockProcessor, err := core.NewCanonical(5, db)
if err != nil {
panic(err)
}
log.Println("Block Number: ", blockProcessor.ChainManager().CurrentBlock().Number())
e := ethash.New(blockProcessor.ChainManager())
miningHash := make([]byte, 32)
if _, err := rand.Read(miningHash); err != nil {
panic(err)
}
diff := big.NewInt(10000)
log.Println("difficulty", diff)
nonce := uint64(0)
ghash_full := e.FullHash(nonce, miningHash)
log.Printf("ethash full (on nonce): %x %x\n", ghash_full, nonce)
ghash_light := e.LightHash(nonce, miningHash)
log.Printf("ethash light (on nonce): %x %x\n", ghash_light, nonce)
if bytes.Compare(ghash_full, ghash_light) != 0 {
t.Errorf("full: %x, light: %x", ghash_full, ghash_light)
}
}
func TestGetSeedHash(t *testing.T) {
seed0, err := ethash.GetSeedHash(0)
if err != nil {
t.Errorf("Failed to get seedHash for block 0: %v", err)
}
if bytes.Compare(seed0, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) != 0 {
log.Printf("seedHash for block 0 should be 0s, was: %v\n", seed0)
}
seed1, err := ethash.GetSeedHash(30000)
if err != nil {
t.Error(err)
}
// From python:
// > from pyethash import get_seedhash
// > get_seedhash(30000)
expectedSeed1, err := hex.DecodeString("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
if err != nil {
t.Error(err)
}
if bytes.Compare(seed1, expectedSeed1) != 0 {
log.Printf("seedHash for block 1 should be: %v,\nactual value: %v\n", expectedSeed1, seed1)
}
}
#!/bin/bash
# Strict mode
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
TEST_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
export GOPATH=${HOME}/.go
export PATH=$PATH:$GOPATH/bin
echo "# getting go dependencies (can take some time)..."
cd ${TEST_DIR}/../.. && go get
cd ${GOPATH}/src/github.com/ethereum/go-ethereum
git checkout poc-9
cd ${TEST_DIR} && go test
pyethereum==0.7.522
nose==1.3.4
pysha3==0.3
\ No newline at end of file
......@@ -14,6 +14,6 @@ TEST_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
[ -d $TEST_DIR/python-virtual-env ] || virtualenv --system-site-packages $TEST_DIR/python-virtual-env
source $TEST_DIR/python-virtual-env/bin/activate
pip install -r $TEST_DIR/requirements.txt > /dev/null
pip install -e $TEST_DIR/../.. > /dev/null
pip install --upgrade --no-deps --force-reinstall -e $TEST_DIR/../..
cd $TEST_DIR
nosetests --with-doctest -v
nosetests --with-doctest -v --nocapture
......@@ -4,42 +4,102 @@ from random import randint
def test_get_cache_size_not_None():
for _ in range(100):
block_num = randint(0,12456789)
out = pyethash.core.get_cache_size(block_num)
out = pyethash.get_cache_size(block_num)
assert out != None
def test_get_full_size_not_None():
for _ in range(100):
block_num = randint(0,12456789)
out = pyethash.core.get_full_size(block_num)
out = pyethash.get_full_size(block_num)
assert out != None
def test_get_cache_size_based_on_EPOCH():
for _ in range(100):
block_num = randint(0,12456789)
out1 = pyethash.core.get_cache_size(block_num)
out2 = pyethash.core.get_cache_size((block_num // pyethash.EPOCH_LENGTH) * pyethash.EPOCH_LENGTH)
out1 = pyethash.get_cache_size(block_num)
out2 = pyethash.get_cache_size((block_num // pyethash.EPOCH_LENGTH) * pyethash.EPOCH_LENGTH)
assert out1 == out2
def test_get_full_size_based_on_EPOCH():
for _ in range(100):
block_num = randint(0,12456789)
out1 = pyethash.core.get_full_size(block_num)
out2 = pyethash.core.get_full_size((block_num // pyethash.EPOCH_LENGTH) * pyethash.EPOCH_LENGTH)
out1 = pyethash.get_full_size(block_num)
out2 = pyethash.get_full_size((block_num // pyethash.EPOCH_LENGTH) * pyethash.EPOCH_LENGTH)
assert out1 == out2
#def test_get_params_based_on_EPOCH():
# block_num = 123456
# out1 = pyethash.core.get_params(block_num)
# out2 = pyethash.core.get_params((block_num // pyethash.EPOCH_LENGTH) * pyethash.EPOCH_LENGTH)
# assert out1["DAG Size"] == out2["DAG Size"]
# assert out1["Cache Size"] == out2["Cache Size"]
#
#def test_get_params_returns_different_values_based_on_different_block_input():
# out1 = pyethash.core.get_params(123456)
# out2 = pyethash.core.get_params(12345)
# assert out1["DAG Size"] != out2["DAG Size"]
# assert out1["Cache Size"] != out2["Cache Size"]
#
#def test_get_cache_smoke_test():
# params = pyethash.core.get_params(123456)
# assert pyethash.core.mkcache(params, "~~~~") != None
# See light_and_full_client_checks in test.cpp
def test_mkcache_is_as_expected():
actual = pyethash.mkcache_bytes(
1024,
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~").encode('hex')
expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b"
assert actual == expected
def test_calc_dataset_is_not_None():
cache = pyethash.mkcache_bytes(
1024,
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
assert pyethash.calc_dataset_bytes(1024 * 32, cache) != None
def test_light_and_full_agree():
cache = pyethash.mkcache_bytes(
1024,
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
full_size = 1024 * 32
header = "~~~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~"
light_result = pyethash.hashimoto_light(full_size, cache, header, 0)
dataset = pyethash.calc_dataset_bytes(full_size, cache)
full_result = pyethash.hashimoto_full(dataset, header, 0)
assert light_result["mix digest"] != None
assert len(light_result["mix digest"]) == 32
assert light_result["mix digest"] == full_result["mix digest"]
assert light_result["result"] != None
assert len(light_result["result"]) == 32
assert light_result["result"] == full_result["result"]
def int_to_bytes(i):
b = []
for _ in range(32):
b.append(chr(i & 0xff))
i >>= 8
b.reverse()
return "".join(b)
def test_mining_basic():
easy_difficulty = int_to_bytes(2**256 - 1)
assert easy_difficulty.encode('hex') == 'f' * 64
cache = pyethash.mkcache_bytes(
1024,
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
full_size = 1024 * 32
header = "~~~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~"
dataset = pyethash.calc_dataset_bytes(full_size, cache)
# Check type of outputs
assert type(pyethash.mine(dataset,header,easy_difficulty)) == dict
assert type(pyethash.mine(dataset,header,easy_difficulty)["nonce"]) == long
assert type(pyethash.mine(dataset,header,easy_difficulty)["mix digest"]) == str
assert type(pyethash.mine(dataset,header,easy_difficulty)["result"]) == str
def test_mining_doesnt_always_return_the_same_value():
easy_difficulty1 = int_to_bytes(int(2**256 * 0.999))
# 1 in 1000 difficulty
easy_difficulty2 = int_to_bytes(int(2**256 * 0.001))
assert easy_difficulty1 != easy_difficulty2
cache = pyethash.mkcache_bytes(
1024,
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
full_size = 1024 * 32
header = "~~~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~"
dataset = pyethash.calc_dataset_bytes(full_size, cache)
# Check type of outputs
assert pyethash.mine(dataset, header, easy_difficulty1)['nonce'] != pyethash.mine(dataset, header, easy_difficulty2)['nonce']
def test_get_seedhash():
assert pyethash.get_seedhash(0).encode('hex') == '0' * 64
import hashlib, sha3
expected = pyethash.get_seedhash(0)
#print "checking seed hashes:",
for i in range(0, 30000*2048, 30000):
#print i // 30000,
assert pyethash.get_seedhash(i) == expected
expected = hashlib.sha3_256(expected).digest()
......@@ -14,7 +14,12 @@ TEST_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
echo -e "\n################# Testing JS ##################"
# TODO: Use mocha and real testing tools instead of rolling our own
cd $TEST_DIR/../js
node test.js
if [ -x "$(which nodejs)" ] ; then
nodejs test.js
fi
if [ -x "$(which node)" ] ; then
node test.js
fi
echo -e "\n################# Testing C ##################"
$TEST_DIR/c/test.sh
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册