提交 9619a610 编写于 作者: F ferhat elmas 提交者: Felix Lange

all: gofmt -w -s (#15419)

上级 bfdc0fa3
......@@ -472,7 +472,7 @@ func TestBindings(t *testing.T) {
t.Fatalf("failed to create temporary workspace: %v", err)
}
defer os.RemoveAll(ws)
pkg := filepath.Join(ws, "bindtest")
if err = os.MkdirAll(pkg, 0700); err != nil {
t.Fatalf("failed to create package: %v", err)
......
......@@ -365,7 +365,7 @@ func TestUnmarshal(t *testing.T) {
buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000"))
err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes())
if err !=nil {
if err != nil {
t.Error(err)
} else {
if bytes.Compare(p0, p0Exp) != 0 {
......
......@@ -182,8 +182,9 @@ type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"faucet.html": &bintree{faucetHtml, map[string]*bintree{}},
"faucet.html": {faucetHtml, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory
......@@ -232,4 +233,3 @@ func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
......@@ -31,14 +31,14 @@ const testSectionSize = 4096
// Tests that wildcard filter rules (nil) can be specified and are handled well.
func TestMatcherWildcards(t *testing.T) {
matcher := NewMatcher(testSectionSize, [][][]byte{
[][]byte{common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard
[][]byte{common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard
[][]byte{common.Hash{0x01}.Bytes()}, // Plain rule, sanity check
[][]byte{common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule
[][]byte{nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule
[][]byte{nil, nil}, // Wildcard combo, drop rule
[][]byte{}, // Inited wildcard rule, drop rule
nil, // Proper wildcard rule, drop rule
{common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard
{common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard
{common.Hash{0x01}.Bytes()}, // Plain rule, sanity check
{common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule
{nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule
{nil, nil}, // Wildcard combo, drop rule
{}, // Inited wildcard rule, drop rule
nil, // Proper wildcard rule, drop rule
})
if len(matcher.filters) != 3 {
t.Fatalf("filter system size mismatch: have %d, want %d", len(matcher.filters), 3)
......
......@@ -60,7 +60,7 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
req.section, // Requested data
req.section, // Duplicated data (ensure it doesn't double close anything)
}, [][]byte{
[]byte{},
{},
new(big.Int).SetUint64(req.section).Bytes(),
new(big.Int).SetUint64(req.section).Bytes(),
})
......
......@@ -356,15 +356,15 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
GasLimit: 6283185,
Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{
common.BytesToAddress([]byte{1}): GenesisAccount{Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{2}): GenesisAccount{Balance: big.NewInt(1)}, // SHA256
common.BytesToAddress([]byte{3}): GenesisAccount{Balance: big.NewInt(1)}, // RIPEMD
common.BytesToAddress([]byte{4}): GenesisAccount{Balance: big.NewInt(1)}, // Identity
common.BytesToAddress([]byte{5}): GenesisAccount{Balance: big.NewInt(1)}, // ModExp
common.BytesToAddress([]byte{6}): GenesisAccount{Balance: big.NewInt(1)}, // ECAdd
common.BytesToAddress([]byte{7}): GenesisAccount{Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): GenesisAccount{Balance: big.NewInt(1)}, // ECPairing
faucet: GenesisAccount{Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
common.BytesToAddress([]byte{4}): {Balance: big.NewInt(1)}, // Identity
common.BytesToAddress([]byte{5}): {Balance: big.NewInt(1)}, // ModExp
common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
},
}
}
......
......@@ -820,7 +820,7 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
// Only reprocess the internal state if something was actually added
if len(dirty) > 0 {
addrs := make([]common.Address, 0, len(dirty))
for addr, _ := range dirty {
for addr := range dirty {
addrs = append(addrs, addr)
}
pool.promoteExecutables(addrs)
......@@ -907,7 +907,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Gather all the accounts potentially needing updates
if accounts == nil {
accounts = make([]common.Address, 0, len(pool.queue))
for addr, _ := range pool.queue {
for addr := range pool.queue {
accounts = append(accounts, addr)
}
}
......
......@@ -105,7 +105,7 @@ func validateTxPoolInternals(pool *TxPool) error {
for addr, txs := range pool.pending {
// Find the last transaction
var last uint64
for nonce, _ := range txs.txs.items {
for nonce := range txs.txs.items {
if last < nonce {
last = nonce
}
......
......@@ -192,7 +192,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
start := time.Now()
mux := new(event.TypeMux)
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
filter := New(backend, 0, int64(headNum), []common.Address{common.Address{}}, nil)
filter := New(backend, 0, int64(headNum), []common.Address{{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
fmt.Println("Finished running filter benchmarks")
......
......@@ -56,7 +56,7 @@ func (eth *LightEthereum) startBloomHandlers() {
task.Bitsets = make([][]byte, len(task.Sections))
compVectors, err := light.GetBloomBits(task.Context, eth.odr, task.Bit, task.Sections)
if err == nil {
for i, _ := range task.Sections {
for i := range task.Sections {
if blob, err := bitutil.DecompressBytes(compVectors[i], int(light.BloomTrieFrequency/8)); err == nil {
task.Bitsets[i] = blob
} else {
......
......@@ -191,7 +191,7 @@ func (d *requestDistributor) nextRequest() (distPeer, *distReq, time.Duration) {
for (len(d.peers) > 0 || elem == d.reqQueue.Front()) && elem != nil {
req := elem.Value.(*distReq)
canSend := false
for peer, _ := range d.peers {
for peer := range d.peers {
if _, ok := checkedPeers[peer]; !ok && peer.canQueue() && req.canSend(peer) {
canSend = true
cost := req.getCost(peer)
......
......@@ -124,7 +124,7 @@ func testRequestDistributor(t *testing.T, resend bool) {
dist := newRequestDistributor(nil, stop)
var peers [testDistPeerCount]*testDistPeer
for i, _ := range peers {
for i := range peers {
peers[i] = &testDistPeer{}
go peers[i].worker(t, !resend, stop)
dist.registerTestPeer(peers[i])
......
......@@ -27,20 +27,20 @@ import (
// LesOdr implements light.OdrBackend
type LesOdr struct {
db ethdb.Database
db ethdb.Database
chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer
retriever *retrieveManager
stop chan struct{}
retriever *retrieveManager
stop chan struct{}
}
func NewLesOdr(db ethdb.Database, chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr {
return &LesOdr{
db: db,
chtIndexer: chtIndexer,
bloomTrieIndexer: bloomTrieIndexer,
bloomIndexer: bloomIndexer,
retriever: retriever,
stop: make(chan struct{}),
db: db,
chtIndexer: chtIndexer,
bloomTrieIndexer: bloomTrieIndexer,
bloomIndexer: bloomIndexer,
retriever: retriever,
stop: make(chan struct{}),
}
}
......
......@@ -290,7 +290,7 @@ func TestSubscriptionMultipleNamespaces(t *testing.T) {
for {
done := true
for id, _ := range count {
for id := range count {
if count, found := count[id]; !found || count < (2*n) {
done = false
}
......
......@@ -244,25 +244,25 @@ func TestClientFileList(t *testing.T) {
}
tests := map[string][]string{
"": []string{"dir1/", "dir2/", "file1.txt", "file2.txt"},
"file": []string{"file1.txt", "file2.txt"},
"file1": []string{"file1.txt"},
"file2.txt": []string{"file2.txt"},
"file12": []string{},
"dir": []string{"dir1/", "dir2/"},
"dir1": []string{"dir1/"},
"dir1/": []string{"dir1/file3.txt", "dir1/file4.txt"},
"dir1/file": []string{"dir1/file3.txt", "dir1/file4.txt"},
"dir1/file3.txt": []string{"dir1/file3.txt"},
"dir1/file34": []string{},
"dir2/": []string{"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
"dir2/file": []string{"dir2/file5.txt"},
"dir2/dir": []string{"dir2/dir3/", "dir2/dir4/"},
"dir2/dir3/": []string{"dir2/dir3/file6.txt"},
"dir2/dir4/": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
"dir2/dir4/file": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
"dir2/dir4/file7.txt": []string{"dir2/dir4/file7.txt"},
"dir2/dir4/file78": []string{},
"": {"dir1/", "dir2/", "file1.txt", "file2.txt"},
"file": {"file1.txt", "file2.txt"},
"file1": {"file1.txt"},
"file2.txt": {"file2.txt"},
"file12": {},
"dir": {"dir1/", "dir2/"},
"dir1": {"dir1/"},
"dir1/": {"dir1/file3.txt", "dir1/file4.txt"},
"dir1/file": {"dir1/file3.txt", "dir1/file4.txt"},
"dir1/file3.txt": {"dir1/file3.txt"},
"dir1/file34": {},
"dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
"dir2/file": {"dir2/file5.txt"},
"dir2/dir": {"dir2/dir3/", "dir2/dir4/"},
"dir2/dir3/": {"dir2/dir3/file6.txt"},
"dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
"dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
"dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"},
"dir2/dir4/file78": {},
}
for prefix, expected := range tests {
actual := ls(prefix)
......
......@@ -50,7 +50,6 @@ data_{i} := size(subtree_{i}) || key_{j} || key_{j+1} .... || key_{j+n-1}
The underlying hash function is configurable
*/
/*
Tree chunker is a concrete implementation of data chunking.
This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree.
......@@ -61,17 +60,17 @@ The hashing itself does use extra copies and allocation though, since it does ne
var (
errAppendOppNotSuported = errors.New("Append operation not supported")
errOperationTimedOut = errors.New("operation timed out")
errOperationTimedOut = errors.New("operation timed out")
)
type TreeChunker struct {
branches int64
hashFunc SwarmHasher
// calculated
hashSize int64 // self.hashFunc.New().Size()
chunkSize int64 // hashSize* branches
workerCount int64 // the number of worker routines used
workerLock sync.RWMutex // lock for the worker count
hashSize int64 // self.hashFunc.New().Size()
chunkSize int64 // hashSize* branches
workerCount int64 // the number of worker routines used
workerLock sync.RWMutex // lock for the worker count
}
func NewTreeChunker(params *ChunkerParams) (self *TreeChunker) {
......@@ -124,7 +123,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
panic("chunker must be initialised")
}
jobC := make(chan *hashJob, 2*ChunkProcessors)
wg := &sync.WaitGroup{}
errC := make(chan error)
......@@ -164,7 +162,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
close(errC)
}()
defer close(quitC)
select {
case err := <-errC:
......@@ -172,7 +169,7 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
return nil, err
}
case <-time.NewTimer(splitTimeout).C:
return nil,errOperationTimedOut
return nil, errOperationTimedOut
}
return key, nil
......
......@@ -123,7 +123,7 @@ type PyramidChunker struct {
hashSize int64
branches int64
workerCount int64
workerLock sync.RWMutex
workerLock sync.RWMutex
}
func NewPyramidChunker(params *ChunkerParams) (self *PyramidChunker) {
......@@ -634,4 +634,4 @@ func (self *PyramidChunker) enqueueDataChunk(chunkData []byte, size uint64, pare
return pkey
}
\ No newline at end of file
}
......@@ -25,26 +25,26 @@ import (
// This table defines supported forks and their chain config.
var Forks = map[string]*params.ChainConfig{
"Frontier": &params.ChainConfig{
"Frontier": {
ChainId: big.NewInt(1),
},
"Homestead": &params.ChainConfig{
"Homestead": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
},
"EIP150": &params.ChainConfig{
"EIP150": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
},
"EIP158": &params.ChainConfig{
"EIP158": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
},
"Byzantium": &params.ChainConfig{
"Byzantium": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
......@@ -53,22 +53,22 @@ var Forks = map[string]*params.ChainConfig{
DAOForkBlock: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
},
"FrontierToHomesteadAt5": &params.ChainConfig{
"FrontierToHomesteadAt5": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(5),
},
"HomesteadToEIP150At5": &params.ChainConfig{
"HomesteadToEIP150At5": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(5),
},
"HomesteadToDaoAt5": &params.ChainConfig{
"HomesteadToDaoAt5": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: big.NewInt(5),
DAOForkSupport: true,
},
"EIP158ToByzantiumAt5": &params.ChainConfig{
"EIP158ToByzantiumAt5": {
ChainId: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
......
......@@ -112,7 +112,7 @@ type stTransactionMarshaling struct {
func (t *StateTest) Subtests() []StateSubtest {
var sub []StateSubtest
for fork, pss := range t.json.Post {
for i, _ := range pss {
for i := range pss {
sub = append(sub, StateSubtest{fork, i})
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册