downloader_test.go 54.4 KB
Newer Older
F
Felix Lange 已提交
1
// Copyright 2015 The go-ethereum Authors
2
// This file is part of the go-ethereum library.
F
Felix Lange 已提交
3
//
4
// The go-ethereum library is free software: you can redistribute it and/or modify
F
Felix Lange 已提交
5 6 7 8
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
9
// The go-ethereum library is distributed in the hope that it will be useful,
F
Felix Lange 已提交
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
F
Felix Lange 已提交
12 13 14
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
15
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
F
Felix Lange 已提交
16

17 18 19
package downloader

import (
20
	"errors"
21
	"fmt"
22
	"math/big"
23
	"sync"
24
	"sync/atomic"
25 26 27 28
	"testing"
	"time"

	"github.com/ethereum/go-ethereum/common"
29
	"github.com/ethereum/go-ethereum/core"
30
	"github.com/ethereum/go-ethereum/core/types"
31
	"github.com/ethereum/go-ethereum/crypto"
32
	"github.com/ethereum/go-ethereum/ethdb"
O
obscuren 已提交
33
	"github.com/ethereum/go-ethereum/event"
34
	"github.com/ethereum/go-ethereum/params"
35 36
)

37
var (
38 39 40 41
	testdb, _   = ethdb.NewMemDatabase()
	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
	testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
	genesis     = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
42
)
43

44 45 46 47
// makeChain creates a chain of n blocks starting at and including parent.
// the returned hash chain is ordered head->parent. In addition, every 3rd block
// contains a transaction and every 5th an uncle to allow testing correct block
// reassembly.
48 49
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block) {
	// Generate the block chain
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
	blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
		block.SetCoinbase(common.Address{seed})

		// If the block number is multiple of 3, send a bonus transaction to the miner
		if parent == genesis && i%3 == 0 {
			tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
			if err != nil {
				panic(err)
			}
			block.AddTx(tx)
		}
		// If the block number is a multiple of 5, add a bonus uncle to the block
		if i%5 == 0 {
			block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
		}
65
	})
66
	// Convert the block-chain into a hash-chain and header/block maps
67 68
	hashes := make([]common.Hash, n+1)
	hashes[len(hashes)-1] = parent.Hash()
69 70 71 72

	headerm := make(map[common.Hash]*types.Header, n+1)
	headerm[parent.Hash()] = parent.Header()

73 74
	blockm := make(map[common.Hash]*types.Block, n+1)
	blockm[parent.Hash()] = parent
75

76 77
	for i, b := range blocks {
		hashes[len(hashes)-i-2] = b.Hash()
78
		headerm[b.Hash()] = b.Header()
79 80
		blockm[b.Hash()] = b
	}
81
	return hashes, headerm, blockm
82 83 84 85
}

// makeChainFork creates two chains of length n, such that h1[:f] and
// h2[:f] are different but have a common suffix of length n-f.
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
func makeChainFork(n, f int, parent *types.Block) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block) {
	// Create the common suffix
	hashes, headers, blocks := makeChain(n-f, 0, parent)

	// Create the forks
	hashes1, headers1, blocks1 := makeChain(f, 1, blocks[hashes[0]])
	hashes1 = append(hashes1, hashes[1:]...)

	hashes2, headers2, blocks2 := makeChain(f, 2, blocks[hashes[0]])
	hashes2 = append(hashes2, hashes[1:]...)

	for hash, header := range headers {
		headers1[hash] = header
		headers2[hash] = header
	}
	for hash, block := range blocks {
		blocks1[hash] = block
		blocks2[hash] = block
	}
	return hashes1, hashes2, headers1, headers2, blocks1, blocks2
106 107
}

108
// downloadTester is a test simulator for mocking out local block chain.
109
type downloadTester struct {
110 111
	downloader *Downloader

112 113 114 115 116 117 118 119 120
	ownHashes    []common.Hash                            // Hash chain belonging to the tester
	ownHeaders   map[common.Hash]*types.Header            // Headers belonging to the tester
	ownBlocks    map[common.Hash]*types.Block             // Blocks belonging to the tester
	ownReceipts  map[common.Hash]types.Receipts           // Receipts belonging to the tester
	ownChainTd   map[common.Hash]*big.Int                 // Total difficulties of the blocks in the local chain
	peerHashes   map[string][]common.Hash                 // Hash chain belonging to different test peers
	peerHeaders  map[string]map[common.Hash]*types.Header // Headers belonging to different test peers
	peerBlocks   map[string]map[common.Hash]*types.Block  // Blocks belonging to different test peers
	peerChainTds map[string]map[common.Hash]*big.Int      // Total difficulties of the blocks in the peer chains
121 122

	lock sync.RWMutex
123 124
}

125
// newTester creates a new downloader test mocker.
126
func newTester(mode SyncMode) *downloadTester {
127
	tester := &downloadTester{
128
		ownHashes:    []common.Hash{genesis.Hash()},
129
		ownHeaders:   map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
130
		ownBlocks:    map[common.Hash]*types.Block{genesis.Hash(): genesis},
131
		ownReceipts:  map[common.Hash]types.Receipts{genesis.Hash(): genesis.Receipts()},
132 133
		ownChainTd:   map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
		peerHashes:   make(map[string][]common.Hash),
134
		peerHeaders:  make(map[string]map[common.Hash]*types.Header),
135 136
		peerBlocks:   make(map[string]map[common.Hash]*types.Block),
		peerChainTds: make(map[string]map[common.Hash]*big.Int),
137
	}
138 139
	tester.downloader = New(mode, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock,
		tester.headHeader, tester.headBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertConfirmedBlocks, tester.dropPeer)
140 141 142 143

	return tester
}

144
// sync starts synchronizing with a remote peer, blocking until it completes.
145
func (dl *downloadTester) sync(id string, td *big.Int) error {
146
	dl.lock.RLock()
147 148 149 150
	hash := dl.peerHashes[id][0]
	// If no particular TD was requested, load from the peer's blockchain
	if td == nil {
		td = big.NewInt(1)
151 152
		if diff, ok := dl.peerChainTds[id][hash]; ok {
			td = diff
153 154
		}
	}
155
	dl.lock.RUnlock()
156

157
	err := dl.downloader.synchronise(id, hash, td)
158 159
	for {
		// If the queue is empty and processing stopped, break
160
		if dl.downloader.queue.Idle() && atomic.LoadInt32(&dl.downloader.processing) == 0 {
161 162 163
			break
		}
		// Otherwise sleep a bit and retry
164 165 166
		time.Sleep(time.Millisecond)
	}
	return err
O
obscuren 已提交
167 168
}

169 170 171 172 173 174
// hasHeader checks if a header is present in the testers canonical chain.
func (dl *downloadTester) hasHeader(hash common.Hash) bool {
	return dl.getHeader(hash) != nil
}

// hasBlock checks if a block is present in the testers canonical chain.
175
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
176 177 178 179 180
	return dl.getBlock(hash) != nil
}

// getHeader retrieves a header from the testers canonical chain.
func (dl *downloadTester) getHeader(hash common.Hash) *types.Header {
181 182 183
	dl.lock.RLock()
	defer dl.lock.RUnlock()

184
	return dl.ownHeaders[hash]
185 186
}

187
// getBlock retrieves a block from the testers canonical chain.
188
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
189 190 191
	dl.lock.RLock()
	defer dl.lock.RUnlock()

192 193 194
	return dl.ownBlocks[hash]
}

195 196 197 198 199 200 201 202
// headHeader retrieves the current head header from the canonical chain.
func (dl *downloadTester) headHeader() *types.Header {
	dl.lock.RLock()
	defer dl.lock.RUnlock()

	return dl.getHeader(dl.ownHashes[len(dl.ownHashes)-1])
}

203 204
// headBlock retrieves the current head block from the canonical chain.
func (dl *downloadTester) headBlock() *types.Block {
205 206 207
	dl.lock.RLock()
	defer dl.lock.RUnlock()

208 209 210 211 212 213
	for i := len(dl.ownHashes) - 1; i >= 0; i-- {
		if block := dl.getBlock(dl.ownHashes[i]); block != nil {
			return block
		}
	}
	return nil
214 215
}

216 217
// getTd retrieves the block's total difficulty from the canonical chain.
func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
218 219 220
	dl.lock.RLock()
	defer dl.lock.RUnlock()

221 222 223
	return dl.ownChainTd[hash]
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
// insertHeaders injects a new batch of headers into the simulated chain.
func (dl *downloadTester) insertHeaders(headers []*types.Header, verify bool) (int, error) {
	dl.lock.Lock()
	defer dl.lock.Unlock()

	for i, header := range headers {
		if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
			return i, errors.New("unknown parent")
		}
		dl.ownHashes = append(dl.ownHashes, header.Hash())
		dl.ownHeaders[header.Hash()] = header
		dl.ownChainTd[header.Hash()] = dl.ownChainTd[header.ParentHash]
	}
	return len(headers), nil
}

// insertBlocks injects a new batch of blocks into the simulated chain.
func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) {
242 243 244
	dl.lock.Lock()
	defer dl.lock.Unlock()

245 246 247 248 249
	for i, block := range blocks {
		if _, ok := dl.ownBlocks[block.ParentHash()]; !ok {
			return i, errors.New("unknown parent")
		}
		dl.ownHashes = append(dl.ownHashes, block.Hash())
250
		dl.ownHeaders[block.Hash()] = block.Header()
251
		dl.ownBlocks[block.Hash()] = block
252
		dl.ownChainTd[block.Hash()] = dl.ownChainTd[block.ParentHash()]
253 254 255 256
	}
	return len(blocks), nil
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
// insertBlocks injects a new batch of blocks into the simulated chain.
func (dl *downloadTester) insertConfirmedBlocks(blocks types.Blocks, receipts []types.Receipts) (int, error) {
	dl.lock.Lock()
	defer dl.lock.Unlock()

	for i := 0; i < len(blocks) && i < len(receipts); i++ {
		if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
			return i, errors.New("unknown parent")
		}
		dl.ownHashes = append(dl.ownHashes, blocks[i].Hash())
		dl.ownHeaders[blocks[i].Hash()] = blocks[i].Header()
		dl.ownBlocks[blocks[i].Hash()] = blocks[i]
		dl.ownReceipts[blocks[i].Hash()] = blocks[i].Receipts()
		dl.ownChainTd[blocks[i].Hash()] = dl.ownChainTd[blocks[i].ParentHash()]
	}
	return len(blocks), nil
}

275
// newPeer registers a new block download source into the downloader.
276 277
func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block) error {
	return dl.newSlowPeer(id, version, hashes, headers, blocks, 0)
278 279 280 281 282
}

// newSlowPeer registers a new block download source into the downloader, with a
// specific delay time on processing the network packets sent to it, simulating
// potentially slow network IO.
283
func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, delay time.Duration) error {
284 285 286
	dl.lock.Lock()
	defer dl.lock.Unlock()

287 288 289
	var err error
	switch version {
	case 61:
290
		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil, nil)
291
	case 62:
292
		err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil)
293
	case 63:
294
		err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay))
295
	case 64:
296
		err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay))
297
	}
298
	if err == nil {
299
		// Assign the owned hashes, headers and blocks to the peer (deep copy)
300 301
		dl.peerHashes[id] = make([]common.Hash, len(hashes))
		copy(dl.peerHashes[id], hashes)
302

303
		dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
304
		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
305
		dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
306

307
		for _, hash := range hashes {
308 309 310 311 312 313
			if header, ok := headers[hash]; ok {
				dl.peerHeaders[id][hash] = header
				if _, ok := dl.peerHeaders[id][header.ParentHash]; ok {
					dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash])
				}
			}
314 315
			if block, ok := blocks[hash]; ok {
				dl.peerBlocks[id][hash] = block
316 317
				if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
					dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
318 319
				}
			}
320
		}
321 322
	}
	return err
323 324
}

325 326
// dropPeer simulates a hard peer removal from the connection pool.
func (dl *downloadTester) dropPeer(id string) {
327 328 329
	dl.lock.Lock()
	defer dl.lock.Unlock()

330
	delete(dl.peerHashes, id)
331
	delete(dl.peerHeaders, id)
332
	delete(dl.peerBlocks, id)
333
	delete(dl.peerChainTds, id)
334 335 336 337

	dl.downloader.UnregisterPeer(id)
}

338
// peerGetRelHashesFn constructs a GetHashes function associated with a specific
339 340
// peer in the download tester. The returned function can be used to retrieve
// batches of hashes from the particularly requested peer.
341
func (dl *downloadTester) peerGetRelHashesFn(id string, delay time.Duration) func(head common.Hash) error {
342
	return func(head common.Hash) error {
343 344
		time.Sleep(delay)

345 346 347
		dl.lock.RLock()
		defer dl.lock.RUnlock()

348 349
		// Gather the next batch of hashes
		hashes := dl.peerHashes[id]
350
		result := make([]common.Hash, 0, MaxHashFetch)
351 352
		for i, hash := range hashes {
			if hash == head {
353
				i++
354 355 356 357 358
				for len(result) < cap(result) && i < len(hashes) {
					result = append(result, hashes[i])
					i++
				}
				break
359 360
			}
		}
361 362 363
		// Delay delivery a bit to allow attacks to unfold
		go func() {
			time.Sleep(time.Millisecond)
364
			dl.downloader.DeliverHashes61(id, result)
365 366
		}()
		return nil
367
	}
368 369
}

370 371 372
// peerGetAbsHashesFn constructs a GetHashesFromNumber function associated with
// a particular peer in the download tester. The returned function can be used to
// retrieve batches of hashes from the particularly requested peer.
373
func (dl *downloadTester) peerGetAbsHashesFn(id string, delay time.Duration) func(uint64, int) error {
374 375 376
	return func(head uint64, count int) error {
		time.Sleep(delay)

377 378 379
		dl.lock.RLock()
		defer dl.lock.RUnlock()

380 381
		// Gather the next batch of hashes
		hashes := dl.peerHashes[id]
382 383
		result := make([]common.Hash, 0, count)
		for i := 0; i < count && len(hashes)-int(head)-1-i >= 0; i++ {
384 385 386 387 388
			result = append(result, hashes[len(hashes)-int(head)-1-i])
		}
		// Delay delivery a bit to allow attacks to unfold
		go func() {
			time.Sleep(time.Millisecond)
389
			dl.downloader.DeliverHashes61(id, result)
390 391 392 393 394
		}()
		return nil
	}
}

395 396 397
// peerGetBlocksFn constructs a getBlocks function associated with a particular
// peer in the download tester. The returned function can be used to retrieve
// batches of blocks from the particularly requested peer.
398
func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
399
	return func(hashes []common.Hash) error {
400
		time.Sleep(delay)
401 402 403 404

		dl.lock.RLock()
		defer dl.lock.RUnlock()

405 406
		blocks := dl.peerBlocks[id]
		result := make([]*types.Block, 0, len(hashes))
407
		for _, hash := range hashes {
408 409
			if block, ok := blocks[hash]; ok {
				result = append(result, block)
410
			}
411
		}
412 413 414 415 416 417
		go dl.downloader.DeliverBlocks61(id, result)

		return nil
	}
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
// peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed
// origin; associated with a particular peer in the download tester. The returned
// function can be used to retrieve batches of headers from the particular peer.
func (dl *downloadTester) peerGetRelHeadersFn(id string, delay time.Duration) func(common.Hash, int, int, bool) error {
	return func(origin common.Hash, amount int, skip int, reverse bool) error {
		// Find the canonical number of the hash
		dl.lock.RLock()
		number := uint64(0)
		for num, hash := range dl.peerHashes[id] {
			if hash == origin {
				number = uint64(len(dl.peerHashes[id]) - num - 1)
				break
			}
		}
		dl.lock.RUnlock()

		// Use the absolute header fetcher to satisfy the query
		return dl.peerGetAbsHeadersFn(id, delay)(number, amount, skip, reverse)
	}
}

439 440 441 442 443 444 445
// peerGetAbsHeadersFn constructs a GetBlockHeaders function based on a numbered
// origin; associated with a particular peer in the download tester. The returned
// function can be used to retrieve batches of headers from the particular peer.
func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) func(uint64, int, int, bool) error {
	return func(origin uint64, amount int, skip int, reverse bool) error {
		time.Sleep(delay)

446 447 448
		dl.lock.RLock()
		defer dl.lock.RUnlock()

449
		// Gather the next batch of headers
450
		hashes := dl.peerHashes[id]
451
		headers := dl.peerHeaders[id]
452 453
		result := make([]*types.Header, 0, amount)
		for i := 0; i < amount && len(hashes)-int(origin)-1-i >= 0; i++ {
454 455
			if header, ok := headers[hashes[len(hashes)-int(origin)-1-i]]; ok {
				result = append(result, header)
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
			}
		}
		// Delay delivery a bit to allow attacks to unfold
		go func() {
			time.Sleep(time.Millisecond)
			dl.downloader.DeliverHeaders(id, result)
		}()
		return nil
	}
}

// peerGetBodiesFn constructs a getBlockBodies method associated with a particular
// peer in the download tester. The returned function can be used to retrieve
// batches of block bodies from the particularly requested peer.
func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([]common.Hash) error {
	return func(hashes []common.Hash) error {
		time.Sleep(delay)
473 474 475 476

		dl.lock.RLock()
		defer dl.lock.RUnlock()

477 478 479 480 481 482 483 484 485 486 487 488
		blocks := dl.peerBlocks[id]

		transactions := make([][]*types.Transaction, 0, len(hashes))
		uncles := make([][]*types.Header, 0, len(hashes))

		for _, hash := range hashes {
			if block, ok := blocks[hash]; ok {
				transactions = append(transactions, block.Transactions())
				uncles = append(uncles, block.Uncles())
			}
		}
		go dl.downloader.DeliverBodies(id, transactions, uncles)
489 490 491 492 493

		return nil
	}
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
// peerGetReceiptsFn constructs a getReceipts method associated with a particular
// peer in the download tester. The returned function can be used to retrieve
// batches of block receipts from the particularly requested peer.
func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func([]common.Hash) error {
	return func(hashes []common.Hash) error {
		time.Sleep(delay)

		dl.lock.RLock()
		defer dl.lock.RUnlock()

		blocks := dl.peerBlocks[id]

		receipts := make([][]*types.Receipt, 0, len(hashes))
		for _, hash := range hashes {
			if block, ok := blocks[hash]; ok {
				receipts = append(receipts, block.Receipts())
			}
		}
		go dl.downloader.DeliverReceipts(id, receipts)

		return nil
	}
}

// assertOwnChain checks if the local chain contains the correct number of items
// of the various chain components.
func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
	headers, blocks, receipts := length, length, length
	switch tester.downloader.mode {
	case FullSync:
		receipts = 1
	case LightSync:
		blocks, receipts = 1, 1
	}

	if hs := len(tester.ownHeaders); hs != headers {
		t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
	}
	if bs := len(tester.ownBlocks); bs != blocks {
		t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
	}
	if rs := len(tester.ownReceipts); rs != receipts {
		t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
	}
}

540 541 542
// Tests that simple synchronization against a canonical chain works correctly.
// In this test common ancestor lookup should be short circuited and not require
// binary searching.
543 544 545 546 547 548 549 550 551
func TestCanonicalSynchronisation61(t *testing.T)      { testCanonicalSynchronisation(t, 61, FullSync) }
func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
func TestCanonicalSynchronisation64Full(t *testing.T)  { testCanonicalSynchronisation(t, 64, FullSync) }
func TestCanonicalSynchronisation64Fast(t *testing.T)  { testCanonicalSynchronisation(t, 64, FastSync) }
func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) }

func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
552 553
	// Create a small enough block chain to download
	targetBlocks := blockCacheLimit - 15
554
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
555

556 557
	tester := newTester(mode)
	tester.newPeer("peer", protocol, hashes, headers, blocks)
558

559
	// Synchronise with the peer and make sure all relevant data was retrieved
560
	if err := tester.sync("peer", nil); err != nil {
561
		t.Fatalf("failed to synchronise blocks: %v", err)
562
	}
563
	assertOwnChain(t, tester, targetBlocks+1)
564 565
}

566 567
// Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved.
568 569 570 571 572 573 574 575
func TestThrottling61(t *testing.T)     { testThrottling(t, 61, FullSync) }
func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }

func testThrottling(t *testing.T, protocol int, mode SyncMode) {
576 577
	// Create a long block chain to download and the tester
	targetBlocks := 8 * blockCacheLimit
578
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
579

580 581
	tester := newTester(mode)
	tester.newPeer("peer", protocol, hashes, headers, blocks)
582

583
	// Wrap the importer to allow stepping
584
	blocked, proceed := uint32(0), make(chan struct{})
585 586
	tester.downloader.chainInsertHook = func(results []*fetchResult) {
		atomic.StoreUint32(&blocked, uint32(len(results)))
587
		<-proceed
588
	}
589 590 591
	// Start a synchronisation concurrently
	errc := make(chan error)
	go func() {
592
		errc <- tester.sync("peer", nil)
593 594
	}()
	// Iteratively take some blocks, always checking the retrieval count
595 596 597 598 599 600 601 602
	for {
		// Check the retrieval count synchronously (! reason for this ugly block)
		tester.lock.RLock()
		retrieved := len(tester.ownBlocks)
		tester.lock.RUnlock()
		if retrieved >= targetBlocks+1 {
			break
		}
603 604
		// Wait a bit for sync to throttle itself
		var cached int
605
		for start := time.Now(); time.Since(start) < time.Second; {
606
			time.Sleep(25 * time.Millisecond)
607

608
			tester.downloader.queue.lock.RLock()
609 610 611 612 613 614
			cached = len(tester.downloader.queue.blockDonePool)
			if mode == FastSync {
				if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
					cached = receipts
				}
			}
615 616
			tester.downloader.queue.lock.RUnlock()

617
			if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 {
618 619 620
				break
			}
		}
621 622
		// Make sure we filled up the cache, then exhaust it
		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
623 624
		if cached != blockCacheLimit && len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) != targetBlocks+1 {
			t.Fatalf("block count mismatch: have %v, want %v (owned %v, target %v)", cached, blockCacheLimit, len(tester.ownBlocks), targetBlocks+1)
625
		}
626 627 628 629
		// Permit the blocked blocks to import
		if atomic.LoadUint32(&blocked) > 0 {
			atomic.StoreUint32(&blocked, uint32(0))
			proceed <- struct{}{}
630
		}
631 632
	}
	// Check that we haven't pulled more blocks than available
633
	assertOwnChain(t, tester, targetBlocks+1)
634 635
	if err := <-errc; err != nil {
		t.Fatalf("block synchronization failed: %v", err)
636 637
	}
}
638

639 640 641
// Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed.
642 643 644 645 646 647 648 649 650
func TestForkedSynchronisation61(t *testing.T)      { testForkedSynchronisation(t, 61, FullSync) }
func TestForkedSynchronisation62(t *testing.T)      { testForkedSynchronisation(t, 62, FullSync) }
func TestForkedSynchronisation63Full(t *testing.T)  { testForkedSynchronisation(t, 63, FullSync) }
func TestForkedSynchronisation63Fast(t *testing.T)  { testForkedSynchronisation(t, 63, FastSync) }
func TestForkedSynchronisation64Full(t *testing.T)  { testForkedSynchronisation(t, 64, FullSync) }
func TestForkedSynchronisation64Fast(t *testing.T)  { testForkedSynchronisation(t, 64, FastSync) }
func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) }

func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
651 652
	// Create a long enough forked chain
	common, fork := MaxHashFetch, 2*MaxHashFetch
653
	hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis)
654

655 656 657
	tester := newTester(mode)
	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA)
	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB)
658 659

	// Synchronise with the peer and make sure all blocks were retrieved
660
	if err := tester.sync("fork A", nil); err != nil {
661 662
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
663 664
	assertOwnChain(t, tester, common+fork+1)

665
	// Synchronise with the second peer and make sure that fork is pulled too
666
	if err := tester.sync("fork B", nil); err != nil {
667 668
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
669
	assertOwnChain(t, tester, common+2*fork+1)
670 671 672
}

// Tests that an inactive downloader will not accept incoming hashes and blocks.
673
func TestInactiveDownloader61(t *testing.T) {
674
	tester := newTester(FullSync)
675 676

	// Check that neither hashes nor blocks are accepted
677 678 679 680 681 682 683 684
	if err := tester.downloader.DeliverHashes61("bad peer", []common.Hash{}); err != errNoSyncActive {
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
	if err := tester.downloader.DeliverBlocks61("bad peer", []*types.Block{}); err != errNoSyncActive {
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
}

685 686
// Tests that an inactive downloader will not accept incoming block headers and
// bodies.
687
func TestInactiveDownloader62(t *testing.T) {
688
	tester := newTester(FullSync)
689 690 691

	// Check that neither block headers nor bodies are accepted
	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
692 693
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
694
	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
695 696 697 698
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
// Tests that an inactive downloader will not accept incoming block headers,
// bodies and receipts.
func TestInactiveDownloader63(t *testing.T) {
	tester := newTester(FullSync)

	// Check that neither block headers nor bodies are accepted
	if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
	if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
	if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
}
715

716 717 718 719 720 721 722 723 724 725
// Tests that a canceled download wipes all previously accumulated state.
func TestCancel61(t *testing.T)      { testCancel(t, 61, FullSync) }
func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
func TestCancel64Full(t *testing.T)  { testCancel(t, 64, FullSync) }
func TestCancel64Fast(t *testing.T)  { testCancel(t, 64, FastSync) }
func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }

func testCancel(t *testing.T, protocol int, mode SyncMode) {
726 727 728 729 730
	// Create a small enough block chain to download and the tester
	targetBlocks := blockCacheLimit - 15
	if targetBlocks >= MaxHashFetch {
		targetBlocks = MaxHashFetch - 15
	}
731 732 733
	if targetBlocks >= MaxHeaderFetch {
		targetBlocks = MaxHeaderFetch - 15
	}
734
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
735

736 737
	tester := newTester(mode)
	tester.newPeer("peer", protocol, hashes, headers, blocks)
738 739 740

	// Make sure canceling works with a pristine downloader
	tester.downloader.cancel()
741 742
	if !tester.downloader.queue.Idle() {
		t.Errorf("download queue not idle")
743 744
	}
	// Synchronise with the peer, but cancel afterwards
745
	if err := tester.sync("peer", nil); err != nil {
746 747 748
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
	tester.downloader.cancel()
749 750
	if !tester.downloader.queue.Idle() {
		t.Errorf("download queue not idle")
751 752 753
	}
}

754
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
755 756 757 758 759 760 761 762 763
func TestMultiSynchronisation61(t *testing.T)      { testMultiSynchronisation(t, 61, FullSync) }
func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
func TestMultiSynchronisation64Full(t *testing.T)  { testMultiSynchronisation(t, 64, FullSync) }
func TestMultiSynchronisation64Fast(t *testing.T)  { testMultiSynchronisation(t, 64, FastSync) }
func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }

func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
764
	// Create various peers with various parts of the chain
765
	targetPeers := 8
766
	targetBlocks := targetPeers*blockCacheLimit - 15
767
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
768

769
	tester := newTester(mode)
770 771
	for i := 0; i < targetPeers; i++ {
		id := fmt.Sprintf("peer #%d", i)
772
		tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks)
773 774 775
	}
	// Synchronise with the middle peer and make sure half of the blocks were retrieved
	id := fmt.Sprintf("peer #%d", targetPeers/2)
776
	if err := tester.sync(id, nil); err != nil {
777 778
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
779 780
	assertOwnChain(t, tester, len(tester.peerHashes[id]))

781
	// Synchronise with the best peer and make sure everything is retrieved
782
	if err := tester.sync("peer #0", nil); err != nil {
783 784
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
785
	assertOwnChain(t, tester, targetBlocks+1)
786 787
}

788 789
// Tests that synchronisations behave well in multi-version protocol environments
// and not wreak havok on other nodes in the network.
790 791 792 793 794 795 796 797 798
func TestMultiProtoSynchronisation61(t *testing.T)      { testMultiProtoSync(t, 61, FullSync) }
func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
func TestMultiProtoSynchronisation64Full(t *testing.T)  { testMultiProtoSync(t, 64, FullSync) }
func TestMultiProtoSynchronisation64Fast(t *testing.T)  { testMultiProtoSync(t, 64, FastSync) }
func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }

func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
799 800
	// Create a small enough block chain to download
	targetBlocks := blockCacheLimit - 15
801
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
802 803

	// Create peers of every type
804 805 806 807 808
	tester := newTester(mode)
	tester.newPeer("peer 61", 61, hashes, headers, blocks)
	tester.newPeer("peer 62", 62, hashes, headers, blocks)
	tester.newPeer("peer 63", 63, hashes, headers, blocks)
	tester.newPeer("peer 64", 64, hashes, headers, blocks)
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825

	// Synchronise with the requestd peer and make sure all blocks were retrieved
	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
	if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
		t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
	}
	// Check that no peers have been dropped off
	for _, version := range []int{61, 62, 63, 64} {
		peer := fmt.Sprintf("peer %d", version)
		if _, ok := tester.peerHashes[peer]; !ok {
			t.Errorf("%s dropped", peer)
		}
	}
}

826
// Tests that if a block is empty (e.g. header only), no body request should be
827
// made, and instead the header should be assembled into a whole block in itself.
828 829 830 831 832 833 834 835
func TestEmptyShortCircuit62(t *testing.T)      { testEmptyShortCircuit(t, 62, FullSync) }
func TestEmptyShortCircuit63Full(t *testing.T)  { testEmptyShortCircuit(t, 63, FullSync) }
func TestEmptyShortCircuit63Fast(t *testing.T)  { testEmptyShortCircuit(t, 63, FastSync) }
func TestEmptyShortCircuit64Full(t *testing.T)  { testEmptyShortCircuit(t, 64, FullSync) }
func TestEmptyShortCircuit64Fast(t *testing.T)  { testEmptyShortCircuit(t, 64, FastSync) }
func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }

func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
836 837
	// Create a small enough block chain to download
	targetBlocks := blockCacheLimit - 15
838
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
839

840 841
	tester := newTester(mode)
	tester.newPeer("peer", protocol, hashes, headers, blocks)
842 843

	// Instrument the downloader to signal body requests
844
	bodies, receipts := int32(0), int32(0)
845
	tester.downloader.bodyFetchHook = func(headers []*types.Header) {
846 847 848 849
		atomic.AddInt32(&bodies, int32(len(headers)))
	}
	tester.downloader.receiptFetchHook = func(headers []*types.Header) {
		atomic.AddInt32(&receipts, int32(len(headers)))
850 851 852 853 854
	}
	// Synchronise with the peer and make sure all blocks were retrieved
	if err := tester.sync("peer", nil); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
855 856
	assertOwnChain(t, tester, targetBlocks+1)

857
	// Validate the number of block bodies that should have been requested
858
	bodiesNeeded, receiptsNeeded := 0, 0
859
	for _, block := range blocks {
860 861
		if mode != LightSync && block != genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
			bodiesNeeded++
862
		}
863 864 865 866 867 868
		if mode == FastSync && block != genesis && len(block.Receipts()) > 0 {
			receiptsNeeded++
		}
	}
	if int(bodies) != bodiesNeeded {
		t.Errorf("body retrieval count mismatch: have %v, want %v", bodies, bodiesNeeded)
869
	}
870 871
	if int(receipts) != receiptsNeeded {
		t.Errorf("receipt retrieval count mismatch: have %v, want %v", receipts, receiptsNeeded)
872 873 874
	}
}

875 876
// Tests that headers are enqueued continuously, preventing malicious nodes from
// stalling the downloader by feeding gapped header chains.
877 878 879 880 881 882 883 884
func TestMissingHeaderAttack62(t *testing.T)      { testMissingHeaderAttack(t, 62, FullSync) }
func TestMissingHeaderAttack63Full(t *testing.T)  { testMissingHeaderAttack(t, 63, FullSync) }
func TestMissingHeaderAttack63Fast(t *testing.T)  { testMissingHeaderAttack(t, 63, FastSync) }
func TestMissingHeaderAttack64Full(t *testing.T)  { testMissingHeaderAttack(t, 64, FullSync) }
func TestMissingHeaderAttack64Fast(t *testing.T)  { testMissingHeaderAttack(t, 64, FastSync) }
func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }

func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
885 886
	// Create a small enough block chain to download
	targetBlocks := blockCacheLimit - 15
887
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
888

889
	tester := newTester(mode)
890 891

	// Attempt a full sync with an attacker feeding gapped headers
892
	tester.newPeer("attack", protocol, hashes, headers, blocks)
893
	missing := targetBlocks / 2
894
	delete(tester.peerHeaders["attack"], hashes[missing])
895 896 897 898 899 900
	delete(tester.peerBlocks["attack"], hashes[missing])

	if err := tester.sync("attack", nil); err == nil {
		t.Fatalf("succeeded attacker synchronisation")
	}
	// Synchronise with the valid peer and make sure sync succeeds
901
	tester.newPeer("valid", protocol, hashes, headers, blocks)
902 903 904
	if err := tester.sync("valid", nil); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
905
	assertOwnChain(t, tester, targetBlocks+1)
906 907 908 909
}

// Tests that if requested headers are shifted (i.e. first is missing), the queue
// detects the invalid numbering.
910 911 912 913 914 915 916 917
func TestShiftedHeaderAttack62(t *testing.T)      { testShiftedHeaderAttack(t, 62, FullSync) }
func TestShiftedHeaderAttack63Full(t *testing.T)  { testShiftedHeaderAttack(t, 63, FullSync) }
func TestShiftedHeaderAttack63Fast(t *testing.T)  { testShiftedHeaderAttack(t, 63, FastSync) }
func TestShiftedHeaderAttack64Full(t *testing.T)  { testShiftedHeaderAttack(t, 64, FullSync) }
func TestShiftedHeaderAttack64Fast(t *testing.T)  { testShiftedHeaderAttack(t, 64, FastSync) }
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }

func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
918 919
	// Create a small enough block chain to download
	targetBlocks := blockCacheLimit - 15
920
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
921

922
	tester := newTester(mode)
923 924

	// Attempt a full sync with an attacker feeding shifted headers
925 926
	tester.newPeer("attack", protocol, hashes, headers, blocks)
	delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
927 928 929 930 931 932
	delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])

	if err := tester.sync("attack", nil); err == nil {
		t.Fatalf("succeeded attacker synchronisation")
	}
	// Synchronise with the valid peer and make sure sync succeeds
933
	tester.newPeer("valid", protocol, hashes, headers, blocks)
934 935 936
	if err := tester.sync("valid", nil); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
937
	assertOwnChain(t, tester, targetBlocks+1)
938 939
}

940 941 942 943 944 945 946 947
// Tests that if a peer sends an invalid block piece (body or receipt) for a
// requested block, it gets dropped immediately by the downloader.
func TestInvalidContentAttack62(t *testing.T)      { testInvalidContentAttack(t, 62, FullSync) }
func TestInvalidContentAttack63Full(t *testing.T)  { testInvalidContentAttack(t, 63, FullSync) }
func TestInvalidContentAttack63Fast(t *testing.T)  { testInvalidContentAttack(t, 63, FastSync) }
func TestInvalidContentAttack64Full(t *testing.T)  { testInvalidContentAttack(t, 64, FullSync) }
func TestInvalidContentAttack64Fast(t *testing.T)  { testInvalidContentAttack(t, 64, FastSync) }
func TestInvalidContentAttack64Light(t *testing.T) { testInvalidContentAttack(t, 64, LightSync) }
948

949
func testInvalidContentAttack(t *testing.T, protocol int, mode SyncMode) {
950 951
	// Create two peers, one feeding invalid block bodies
	targetBlocks := 4*blockCacheLimit - 15
952
	hashes, headers, validBlocks := makeChain(targetBlocks, 0, genesis)
953 954 955 956 957

	invalidBlocks := make(map[common.Hash]*types.Block)
	for hash, block := range validBlocks {
		invalidBlocks[hash] = types.NewBlockWithHeader(block.Header())
	}
958 959 960 961
	invalidReceipts := make(map[common.Hash]*types.Block)
	for hash, block := range validBlocks {
		invalidReceipts[hash] = types.NewBlockWithHeader(block.Header()).WithBody(block.Transactions(), block.Uncles())
	}
962

963 964 965 966 967 968 969 970
	tester := newTester(mode)
	tester.newPeer("valid", protocol, hashes, headers, validBlocks)
	if mode != LightSync {
		tester.newPeer("body attack", protocol, hashes, headers, invalidBlocks)
	}
	if mode == FastSync {
		tester.newPeer("receipt attack", protocol, hashes, headers, invalidReceipts)
	}
971 972 973 974
	// Synchronise with the valid peer (will pull contents from the attacker too)
	if err := tester.sync("valid", nil); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
975 976
	assertOwnChain(t, tester, targetBlocks+1)

977
	// Make sure the attacker was detected and dropped in the mean time
978
	if _, ok := tester.peerHashes["body attack"]; ok {
979 980
		t.Fatalf("block body attacker not detected/dropped")
	}
981 982 983
	if _, ok := tester.peerHashes["receipt attack"]; ok {
		t.Fatalf("receipt attacker not detected/dropped")
	}
984 985
}

986 987
// Tests that a peer advertising an high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes.
988 989 990 991 992 993 994 995 996 997 998 999 1000
func TestHighTDStarvationAttack61(t *testing.T)      { testHighTDStarvationAttack(t, 61, FullSync) }
func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
func TestHighTDStarvationAttack64Full(t *testing.T)  { testHighTDStarvationAttack(t, 64, FullSync) }
func TestHighTDStarvationAttack64Fast(t *testing.T)  { testHighTDStarvationAttack(t, 64, FastSync) }
func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }

func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
	tester := newTester(mode)
	hashes, headers, blocks := makeChain(0, 0, genesis)

	tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks)
1001
	if err := tester.sync("attack", big.NewInt(1000000)); err != errStallingPeer {
1002 1003 1004 1005
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
	}
}

1006
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
1007 1008 1009 1010 1011 1012
func TestBlockHeaderAttackerDropping61(t *testing.T) { testBlockHeaderAttackerDropping(t, 61) }
func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }

func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
1013
	// Define the disconnection requirement for individual hash fetch errors
1014 1015 1016 1017
	tests := []struct {
		result error
		drop   bool
	}{
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		{nil, false},                  // Sync succeeded, all is well
		{errBusy, false},              // Sync is already in progress, no problem
		{errUnknownPeer, false},       // Peer is unknown, was already dropped, don't double drop
		{errBadPeer, true},            // Peer was deemed bad for some reason, drop it
		{errStallingPeer, true},       // Peer was detected to be stalling, drop it
		{errNoPeers, false},           // No peers to download from, soft race, no issue
		{errPendingQueue, false},      // There are blocks still cached, wait to exhaust, no issue
		{errTimeout, true},            // No hashes received in due time, drop the peer
		{errEmptyHashSet, true},       // No hashes were returned as a response, drop as it's a dead end
		{errEmptyHeaderSet, true},     // No headers were returned as a response, drop as it's a dead end
		{errPeersUnavailable, true},   // Nobody had the advertised blocks, drop the advertiser
		{errInvalidChain, true},       // Hash chain was detected as invalid, definitely drop
1030
		{errInvalidBlock, false},      // A bad peer was detected, but not the sync origin
1031
		{errInvalidBody, false},       // A bad peer was detected, but not the sync origin
1032
		{errInvalidReceipt, false},    // A bad peer was detected, but not the sync origin
1033 1034 1035 1036
		{errCancelHashFetch, false},   // Synchronisation was canceled, origin may be innocent, don't drop
		{errCancelBlockFetch, false},  // Synchronisation was canceled, origin may be innocent, don't drop
		{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
		{errCancelBodyFetch, false},   // Synchronisation was canceled, origin may be innocent, don't drop
1037 1038
	}
	// Run the tests and check disconnection status
1039
	tester := newTester(FullSync)
1040 1041 1042
	for i, tt := range tests {
		// Register a new peer and ensure it's presence
		id := fmt.Sprintf("test %d", i)
1043
		if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil, nil); err != nil {
1044 1045 1046 1047 1048 1049 1050 1051
			t.Fatalf("test %d: failed to register new peer: %v", i, err)
		}
		if _, ok := tester.peerHashes[id]; !ok {
			t.Fatalf("test %d: registered peer not found", i)
		}
		// Simulate a synchronisation and check the required result
		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }

1052
		tester.downloader.Synchronise(id, genesis.Hash(), big.NewInt(1000))
1053 1054 1055 1056 1057
		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
		}
	}
}
1058

1059 1060
// Tests that synchronisation boundaries (origin block number and highest block
// number) is tracked and updated correctly.
1061 1062 1063 1064 1065 1066 1067 1068 1069
func TestSyncBoundaries61(t *testing.T)      { testSyncBoundaries(t, 61, FullSync) }
func TestSyncBoundaries62(t *testing.T)      { testSyncBoundaries(t, 62, FullSync) }
func TestSyncBoundaries63Full(t *testing.T)  { testSyncBoundaries(t, 63, FullSync) }
func TestSyncBoundaries63Fast(t *testing.T)  { testSyncBoundaries(t, 63, FastSync) }
func TestSyncBoundaries64Full(t *testing.T)  { testSyncBoundaries(t, 64, FullSync) }
func TestSyncBoundaries64Fast(t *testing.T)  { testSyncBoundaries(t, 64, FastSync) }
func TestSyncBoundaries64Light(t *testing.T) { testSyncBoundaries(t, 64, LightSync) }

func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
1070 1071
	// Create a small enough block chain to download
	targetBlocks := blockCacheLimit - 15
1072
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
1073 1074 1075 1076 1077

	// Set a sync init hook to catch boundary changes
	starting := make(chan struct{})
	progress := make(chan struct{})

1078
	tester := newTester(mode)
1079 1080 1081 1082 1083 1084 1085 1086 1087
	tester.downloader.syncInitHook = func(origin, latest uint64) {
		starting <- struct{}{}
		<-progress
	}
	// Retrieve the sync boundaries and ensure they are zero (pristine sync)
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
		t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
	}
	// Synchronise half the blocks and check initial boundaries
1088
	tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks)
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	pending := new(sync.WaitGroup)
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("peer-half", nil); err != nil {
			t.Fatalf("failed to synchronise blocks: %v", err)
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks/2+1) {
		t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks/2+1)
	}
	progress <- struct{}{}
	pending.Wait()

	// Synchronise all the blocks and check continuation boundaries
1106
	tester.newPeer("peer-full", protocol, hashes, headers, blocks)
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("peer-full", nil); err != nil {
			t.Fatalf("failed to synchronise blocks: %v", err)
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != uint64(targetBlocks/2+1) || latest != uint64(targetBlocks) {
		t.Fatalf("Completing boundary mismatch: have %v/%v, want %v/%v", origin, latest, targetBlocks/2+1, targetBlocks)
	}
	progress <- struct{}{}
	pending.Wait()
}

// Tests that synchronisation boundaries (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head
// revertal).
1126 1127 1128 1129 1130 1131 1132 1133 1134
func TestForkedSyncBoundaries61(t *testing.T)      { testForkedSyncBoundaries(t, 61, FullSync) }
func TestForkedSyncBoundaries62(t *testing.T)      { testForkedSyncBoundaries(t, 62, FullSync) }
func TestForkedSyncBoundaries63Full(t *testing.T)  { testForkedSyncBoundaries(t, 63, FullSync) }
func TestForkedSyncBoundaries63Fast(t *testing.T)  { testForkedSyncBoundaries(t, 63, FastSync) }
func TestForkedSyncBoundaries64Full(t *testing.T)  { testForkedSyncBoundaries(t, 64, FullSync) }
func TestForkedSyncBoundaries64Fast(t *testing.T)  { testForkedSyncBoundaries(t, 64, FastSync) }
func TestForkedSyncBoundaries64Light(t *testing.T) { testForkedSyncBoundaries(t, 64, LightSync) }

func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
1135 1136
	// Create a forked chain to simulate origin revertal
	common, fork := MaxHashFetch, 2*MaxHashFetch
1137
	hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis)
1138 1139 1140 1141 1142

	// Set a sync init hook to catch boundary changes
	starting := make(chan struct{})
	progress := make(chan struct{})

1143
	tester := newTester(mode)
1144 1145 1146 1147 1148 1149 1150 1151 1152
	tester.downloader.syncInitHook = func(origin, latest uint64) {
		starting <- struct{}{}
		<-progress
	}
	// Retrieve the sync boundaries and ensure they are zero (pristine sync)
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
		t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
	}
	// Synchronise with one of the forks and check boundaries
1153
	tester.newPeer("fork A", protocol, hashesA, headersA, blocksA)
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
	pending := new(sync.WaitGroup)
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("fork A", nil); err != nil {
			t.Fatalf("failed to synchronise blocks: %v", err)
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(len(hashesA)-1) {
		t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, len(hashesA)-1)
	}
	progress <- struct{}{}
	pending.Wait()

	// Simulate a successful sync above the fork
	tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight

	// Synchronise with the second fork and check boundary resets
1174
	tester.newPeer("fork B", protocol, hashesB, headersB, blocksB)
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("fork B", nil); err != nil {
			t.Fatalf("failed to synchronise blocks: %v", err)
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != uint64(common) || latest != uint64(len(hashesB)-1) {
		t.Fatalf("Forking boundary mismatch: have %v/%v, want %v/%v", origin, latest, common, len(hashesB)-1)
	}
	progress <- struct{}{}
	pending.Wait()
}

// Tests that if synchronisation is aborted due to some failure, then the boundary
// origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance.
1194 1195 1196 1197 1198 1199 1200 1201 1202
func TestFailedSyncBoundaries61(t *testing.T)      { testFailedSyncBoundaries(t, 61, FullSync) }
func TestFailedSyncBoundaries62(t *testing.T)      { testFailedSyncBoundaries(t, 62, FullSync) }
func TestFailedSyncBoundaries63Full(t *testing.T)  { testFailedSyncBoundaries(t, 63, FullSync) }
func TestFailedSyncBoundaries63Fast(t *testing.T)  { testFailedSyncBoundaries(t, 63, FastSync) }
func TestFailedSyncBoundaries64Full(t *testing.T)  { testFailedSyncBoundaries(t, 64, FullSync) }
func TestFailedSyncBoundaries64Fast(t *testing.T)  { testFailedSyncBoundaries(t, 64, FastSync) }
func TestFailedSyncBoundaries64Light(t *testing.T) { testFailedSyncBoundaries(t, 64, LightSync) }

func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
1203 1204
	// Create a small enough block chain to download
	targetBlocks := blockCacheLimit - 15
1205
	hashes, headers, blocks := makeChain(targetBlocks, 0, genesis)
1206 1207 1208 1209 1210

	// Set a sync init hook to catch boundary changes
	starting := make(chan struct{})
	progress := make(chan struct{})

1211
	tester := newTester(mode)
1212 1213 1214 1215 1216 1217 1218 1219 1220
	tester.downloader.syncInitHook = func(origin, latest uint64) {
		starting <- struct{}{}
		<-progress
	}
	// Retrieve the sync boundaries and ensure they are zero (pristine sync)
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
		t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
	}
	// Attempt a full sync with a faulty peer
1221
	tester.newPeer("faulty", protocol, hashes, headers, blocks)
1222
	missing := targetBlocks / 2
1223
	delete(tester.peerHeaders["faulty"], hashes[missing])
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	delete(tester.peerBlocks["faulty"], hashes[missing])

	pending := new(sync.WaitGroup)
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("faulty", nil); err == nil {
			t.Fatalf("succeeded faulty synchronisation")
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) {
		t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks)
	}
	progress <- struct{}{}
	pending.Wait()

	// Synchronise with a good peer and check that the boundary origin remind the same after a failure
1243
	tester.newPeer("valid", protocol, hashes, headers, blocks)
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("valid", nil); err != nil {
			t.Fatalf("failed to synchronise blocks: %v", err)
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) {
		t.Fatalf("Completing boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks)
	}
	progress <- struct{}{}
	pending.Wait()
}

// Tests that if an attacker fakes a chain height, after the attack is detected,
// the boundary height is successfully reduced at the next sync invocation.
1262 1263 1264 1265 1266 1267 1268 1269 1270
func TestFakedSyncBoundaries61(t *testing.T)      { testFakedSyncBoundaries(t, 61, FullSync) }
func TestFakedSyncBoundaries62(t *testing.T)      { testFakedSyncBoundaries(t, 62, FullSync) }
func TestFakedSyncBoundaries63Full(t *testing.T)  { testFakedSyncBoundaries(t, 63, FullSync) }
func TestFakedSyncBoundaries63Fast(t *testing.T)  { testFakedSyncBoundaries(t, 63, FastSync) }
func TestFakedSyncBoundaries64Full(t *testing.T)  { testFakedSyncBoundaries(t, 64, FullSync) }
func TestFakedSyncBoundaries64Fast(t *testing.T)  { testFakedSyncBoundaries(t, 64, FastSync) }
func TestFakedSyncBoundaries64Light(t *testing.T) { testFakedSyncBoundaries(t, 64, LightSync) }

func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
1271 1272
	// Create a small block chain
	targetBlocks := blockCacheLimit - 15
1273
	hashes, headers, blocks := makeChain(targetBlocks+3, 0, genesis)
1274 1275 1276 1277 1278

	// Set a sync init hook to catch boundary changes
	starting := make(chan struct{})
	progress := make(chan struct{})

1279
	tester := newTester(mode)
1280 1281 1282 1283 1284 1285 1286 1287 1288
	tester.downloader.syncInitHook = func(origin, latest uint64) {
		starting <- struct{}{}
		<-progress
	}
	// Retrieve the sync boundaries and ensure they are zero (pristine sync)
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 {
		t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
	}
	//  Create and sync with an attacker that promises a higher chain than available
1289
	tester.newPeer("attack", protocol, hashes, headers, blocks)
1290
	for i := 1; i < 3; i++ {
1291
		delete(tester.peerHeaders["attack"], hashes[i])
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
		delete(tester.peerBlocks["attack"], hashes[i])
	}

	pending := new(sync.WaitGroup)
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("attack", nil); err == nil {
			t.Fatalf("succeeded attacker synchronisation")
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks+3) {
		t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks+3)
	}
	progress <- struct{}{}
	pending.Wait()

	// Synchronise with a good peer and check that the boundary height has been reduced to the true value
1312
	tester.newPeer("valid", protocol, hashes[3:], headers, blocks)
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	pending.Add(1)

	go func() {
		defer pending.Done()
		if err := tester.sync("valid", nil); err != nil {
			t.Fatalf("failed to synchronise blocks: %v", err)
		}
	}()
	<-starting
	if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) {
		t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks)
	}
	progress <- struct{}{}
	pending.Wait()
}