downloader_test.go 30.2 KB
Newer Older
1 2 3 4
package downloader

import (
	"encoding/binary"
5
	"errors"
6
	"fmt"
7
	"math/big"
8
	"sync/atomic"
9 10 11 12
	"testing"
	"time"

	"github.com/ethereum/go-ethereum/common"
13
	"github.com/ethereum/go-ethereum/core"
14
	"github.com/ethereum/go-ethereum/core/types"
O
obscuren 已提交
15
	"github.com/ethereum/go-ethereum/event"
16 17
)

18
var (
19 20 21
	knownHash   = common.Hash{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
	unknownHash = common.Hash{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
	bannedHash  = common.Hash{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
22 23

	genesis = createBlock(1, common.Hash{}, knownHash)
24
)
25

26 27 28 29 30
// idCounter is used by the createHashes method the generate deterministic but unique hashes
var idCounter = int64(2) // #1 is the genesis block

// createHashes generates a batch of hashes rooted at a specific point in the chain.
func createHashes(amount int, root common.Hash) (hashes []common.Hash) {
31
	hashes = make([]common.Hash, amount+1)
32
	hashes[len(hashes)-1] = root
33

34 35 36
	for i := 0; i < len(hashes)-1; i++ {
		binary.BigEndian.PutUint64(hashes[i][:8], uint64(idCounter))
		idCounter++
37 38 39 40
	}
	return
}

41
// createBlock assembles a new block at the given chain height.
42
func createBlock(i int, parent, hash common.Hash) *types.Block {
F
Felix Lange 已提交
43 44 45 46
	header := &types.Header{
		Hash: hash,
		Number: big.NewInt(int64(i))
	}
47 48
	block := types.NewBlockWithHeader(header)
	block.HeaderHash = hash
49
	block.ParentHeaderHash = parent
50 51 52
	return block
}

53 54 55 56 57
// copyBlock makes a deep copy of a block suitable for local modifications.
func copyBlock(block *types.Block) *types.Block {
	return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash)
}

58 59
// createBlocksFromHashes assembles a collection of blocks, each having a correct
// place in the given hash chain.
60 61
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
	blocks := make(map[common.Hash]*types.Block)
62 63 64 65 66 67
	for i := 0; i < len(hashes); i++ {
		parent := knownHash
		if i < len(hashes)-1 {
			parent = hashes[i+1]
		}
		blocks[hashes[i]] = createBlock(len(hashes)-i, parent, hashes[i])
68 69 70 71
	}
	return blocks
}

72
// downloadTester is a test simulator for mocking out local block chain.
73
type downloadTester struct {
74 75
	downloader *Downloader

76 77 78 79
	ownHashes  []common.Hash                           // Hash chain belonging to the tester
	ownBlocks  map[common.Hash]*types.Block            // Blocks belonging to the tester
	peerHashes map[string][]common.Hash                // Hash chain belonging to different test peers
	peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers
80

81
	maxHashFetch int // Overrides the maximum number of retrieved hashes
82 83
}

84
// newTester creates a new downloader test mocker.
85
func newTester() *downloadTester {
86
	tester := &downloadTester{
87 88 89 90
		ownHashes:  []common.Hash{knownHash},
		ownBlocks:  map[common.Hash]*types.Block{knownHash: genesis},
		peerHashes: make(map[string][]common.Hash),
		peerBlocks: make(map[string]map[common.Hash]*types.Block),
91
	}
92
	tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.insertChain, tester.dropPeer)
93 94 95 96

	return tester
}

97 98
// sync starts synchronizing with a remote peer, blocking until it completes.
func (dl *downloadTester) sync(id string) error {
99 100 101 102 103
	err := dl.downloader.synchronise(id, dl.peerHashes[id][0])
	for atomic.LoadInt32(&dl.downloader.processing) == 1 {
		time.Sleep(time.Millisecond)
	}
	return err
O
obscuren 已提交
104 105
}

106
// hasBlock checks if a block is pres	ent in the testers canonical chain.
107
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
108
	return dl.getBlock(hash) != nil
109 110
}

111
// getBlock retrieves a block from the testers canonical chain.
112
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
113 114 115
	return dl.ownBlocks[hash]
}

116 117 118 119 120 121 122 123 124 125 126 127
// insertChain injects a new batch of blocks into the simulated chain.
func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
	for i, block := range blocks {
		if _, ok := dl.ownBlocks[block.ParentHash()]; !ok {
			return i, errors.New("unknown parent")
		}
		dl.ownHashes = append(dl.ownHashes, block.Hash())
		dl.ownBlocks[block.Hash()] = block
	}
	return len(blocks), nil
}

128 129
// newPeer registers a new block download source into the downloader.
func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error {
130 131 132 133 134 135 136 137
	return dl.newSlowPeer(id, hashes, blocks, 0)
}

// newSlowPeer registers a new block download source into the downloader, with a
// specific delay time on processing the network packets sent to it, simulating
// potentially slow network IO.
func (dl *downloadTester) newSlowPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error {
	err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id, delay), dl.peerGetBlocksFn(id, delay))
138
	if err == nil {
139 140 141 142 143 144 145 146
		// Assign the owned hashes and blocks to the peer (deep copy)
		dl.peerHashes[id] = make([]common.Hash, len(hashes))
		copy(dl.peerHashes[id], hashes)

		dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
		for hash, block := range blocks {
			dl.peerBlocks[id][hash] = copyBlock(block)
		}
147 148
	}
	return err
149 150
}

151 152 153 154 155 156 157 158
// dropPeer simulates a hard peer removal from the connection pool.
func (dl *downloadTester) dropPeer(id string) {
	delete(dl.peerHashes, id)
	delete(dl.peerBlocks, id)

	dl.downloader.UnregisterPeer(id)
}

159 160 161
// peerGetBlocksFn constructs a getHashes function associated with a particular
// peer in the download tester. The returned function can be used to retrieve
// batches of hashes from the particularly requested peer.
162
func (dl *downloadTester) peerGetHashesFn(id string, delay time.Duration) func(head common.Hash) error {
163
	return func(head common.Hash) error {
164 165
		time.Sleep(delay)

166 167 168 169 170 171 172 173 174
		limit := MaxHashFetch
		if dl.maxHashFetch > 0 {
			limit = dl.maxHashFetch
		}
		// Gather the next batch of hashes
		hashes := dl.peerHashes[id]
		result := make([]common.Hash, 0, limit)
		for i, hash := range hashes {
			if hash == head {
175
				i++
176 177 178 179 180
				for len(result) < cap(result) && i < len(hashes) {
					result = append(result, hashes[i])
					i++
				}
				break
181 182
			}
		}
183 184 185 186 187 188
		// Delay delivery a bit to allow attacks to unfold
		go func() {
			time.Sleep(time.Millisecond)
			dl.downloader.DeliverHashes(id, result)
		}()
		return nil
189
	}
190 191
}

192 193 194
// peerGetBlocksFn constructs a getBlocks function associated with a particular
// peer in the download tester. The returned function can be used to retrieve
// batches of blocks from the particularly requested peer.
195
func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
196
	return func(hashes []common.Hash) error {
197 198
		time.Sleep(delay)

199 200
		blocks := dl.peerBlocks[id]
		result := make([]*types.Block, 0, len(hashes))
201
		for _, hash := range hashes {
202 203
			if block, ok := blocks[hash]; ok {
				result = append(result, block)
204
			}
205
		}
206
		go dl.downloader.DeliverBlocks(id, result)
207 208 209 210 211

		return nil
	}
}

212 213 214 215
// Tests that simple synchronization, without throttling from a good peer works.
func TestSynchronisation(t *testing.T) {
	// Create a small enough block chain to download and the tester
	targetBlocks := blockCacheLimit - 15
216
	hashes := createHashes(targetBlocks, knownHash)
217 218
	blocks := createBlocksFromHashes(hashes)

219 220
	tester := newTester()
	tester.newPeer("peer", hashes, blocks)
221

222
	// Synchronise with the peer and make sure all blocks were retrieved
223
	if err := tester.sync("peer"); err != nil {
224
		t.Fatalf("failed to synchronise blocks: %v", err)
225
	}
226 227
	if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
		t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
228
	}
229
}
230

231
// Tests that an inactive downloader will not accept incoming hashes and blocks.
232
func TestInactiveDownloader(t *testing.T) {
233
	tester := newTester()
234

235
	// Check that neither hashes nor blocks are accepted
236
	if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
237 238
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
239
	if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive {
240
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
241 242 243
	}
}

244
// Tests that a canceled download wipes all previously accumulated state.
245
func TestCancel(t *testing.T) {
246 247
	// Create a small enough block chain to download and the tester
	targetBlocks := blockCacheLimit - 15
248
	hashes := createHashes(targetBlocks, knownHash)
249 250
	blocks := createBlocksFromHashes(hashes)

251 252
	tester := newTester()
	tester.newPeer("peer", hashes, blocks)
253

254
	// Make sure canceling works with a pristine downloader
255
	tester.downloader.cancel()
256 257 258 259
	hashCount, blockCount := tester.downloader.queue.Size()
	if hashCount > 0 || blockCount > 0 {
		t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
	}
260
	// Synchronise with the peer, but cancel afterwards
261
	if err := tester.sync("peer"); err != nil {
262
		t.Fatalf("failed to synchronise blocks: %v", err)
263
	}
264
	tester.downloader.cancel()
265
	hashCount, blockCount = tester.downloader.queue.Size()
266 267 268
	if hashCount > 0 || blockCount > 0 {
		t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
	}
269 270
}

271 272
// Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved.
273
func TestThrottling(t *testing.T) {
274 275
	// Create a long block chain to download and the tester
	targetBlocks := 8 * blockCacheLimit
276
	hashes := createHashes(targetBlocks, knownHash)
277 278
	blocks := createBlocksFromHashes(hashes)

279 280
	tester := newTester()
	tester.newPeer("peer", hashes, blocks)
281

282 283 284 285 286 287 288
	// Wrap the importer to allow stepping
	done := make(chan int)
	tester.downloader.insertChain = func(blocks types.Blocks) (int, error) {
		n, err := tester.insertChain(blocks)
		done <- n
		return n, err
	}
289 290 291
	// Start a synchronisation concurrently
	errc := make(chan error)
	go func() {
292
		errc <- tester.sync("peer")
293 294
	}()
	// Iteratively take some blocks, always checking the retrieval count
295 296 297
	for len(tester.ownBlocks) < targetBlocks+1 {
		// Wait a bit for sync to throttle itself
		var cached int
298 299
		for start := time.Now(); time.Since(start) < 3*time.Second; {
			time.Sleep(25 * time.Millisecond)
300 301 302

			cached = len(tester.downloader.queue.blockPool)
			if cached == blockCacheLimit || len(tester.ownBlocks)+cached == targetBlocks+1 {
303 304 305
				break
			}
		}
306 307 308 309
		// Make sure we filled up the cache, then exhaust it
		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
		if cached != blockCacheLimit && len(tester.ownBlocks)+cached < targetBlocks+1 {
			t.Fatalf("block count mismatch: have %v, want %v", cached, blockCacheLimit)
310
		}
311 312 313
		<-done // finish previous blocking import
		for cached > maxBlockProcess {
			cached -= <-done
314
		}
315 316 317 318 319 320 321
		time.Sleep(25 * time.Millisecond) // yield to the insertion
	}
	<-done // finish the last blocking import

	// Check that we haven't pulled more blocks than available
	if len(tester.ownBlocks) > targetBlocks+1 {
		t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1)
322
	}
323 324
	if err := <-errc; err != nil {
		t.Fatalf("block synchronization failed: %v", err)
325 326
	}
}
327

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
func TestMultiSynchronisation(t *testing.T) {
	// Create various peers with various parts of the chain
	targetPeers := 16
	targetBlocks := targetPeers*blockCacheLimit - 15

	hashes := createHashes(targetBlocks, knownHash)
	blocks := createBlocksFromHashes(hashes)

	tester := newTester()
	for i := 0; i < targetPeers; i++ {
		id := fmt.Sprintf("peer #%d", i)
		tester.newPeer(id, hashes[i*blockCacheLimit:], blocks)
	}
	// Synchronise with the middle peer and make sure half of the blocks were retrieved
	id := fmt.Sprintf("peer #%d", targetPeers/2)
	if err := tester.sync(id); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
	if imported := len(tester.ownBlocks); imported != len(tester.peerHashes[id]) {
		t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(tester.peerHashes[id]))
	}
	// Synchronise with the best peer and make sure everything is retrieved
	if err := tester.sync("peer #0"); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
	if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
		t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
	}
}

359 360 361 362 363 364 365 366
// Tests that synchronising with a peer who's very slow at network IO does not
// stall the other peers in the system.
func TestSlowSynchronisation(t *testing.T) {
	tester := newTester()

	// Create a batch of blocks, with a slow and a full speed peer
	targetCycles := 2
	targetBlocks := targetCycles*blockCacheLimit - 15
367
	targetIODelay := time.Second
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389

	hashes := createHashes(targetBlocks, knownHash)
	blocks := createBlocksFromHashes(hashes)

	tester.newSlowPeer("fast", hashes, blocks, 0)
	tester.newSlowPeer("slow", hashes, blocks, targetIODelay)

	// Try to sync with the peers (pull hashes from fast)
	start := time.Now()
	if err := tester.sync("fast"); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
	if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
		t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
	}
	// Check that the slow peer got hit at most once per block-cache-size import
	limit := time.Duration(targetCycles+1) * targetIODelay
	if delay := time.Since(start); delay >= limit {
		t.Fatalf("synchronisation exceeded delay limit: have %v, want %v", delay, limit)
	}
}

390 391 392
// Tests that if a peer returns an invalid chain with a block pointing to a non-
// existing parent, it is correctly detected and handled.
func TestNonExistingParentAttack(t *testing.T) {
393 394
	tester := newTester()

395
	// Forge a single-link chain with a forged header
396
	hashes := createHashes(1, knownHash)
397
	blocks := createBlocksFromHashes(hashes)
398
	tester.newPeer("valid", hashes, blocks)
399

400 401 402 403
	hashes = createHashes(1, knownHash)
	blocks = createBlocksFromHashes(hashes)
	blocks[hashes[0]].ParentHeaderHash = unknownHash
	tester.newPeer("attack", hashes, blocks)
404 405

	// Try and sync with the malicious node and check that it fails
406 407
	if err := tester.sync("attack"); err == nil {
		t.Fatalf("block synchronization succeeded")
408
	}
409 410
	if tester.hasBlock(hashes[0]) {
		t.Fatalf("tester accepted unknown-parent block: %v", blocks[hashes[0]])
411
	}
412 413
	// Try to synchronize with the valid chain and make sure it succeeds
	if err := tester.sync("valid"); err != nil {
414 415
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
416 417
	if !tester.hasBlock(tester.peerHashes["valid"][0]) {
		t.Fatalf("tester didn't accept known-parent block: %v", tester.peerBlocks["valid"][hashes[0]])
418
	}
419
}
420 421 422

// Tests that if a malicious peers keeps sending us repeating hashes, we don't
// loop indefinitely.
423 424 425
func TestRepeatingHashAttack(t *testing.T) { // TODO: Is this thing valid??
	tester := newTester()

426
	// Create a valid chain, but drop the last link
427
	hashes := createHashes(blockCacheLimit, knownHash)
428
	blocks := createBlocksFromHashes(hashes)
429 430
	tester.newPeer("valid", hashes, blocks)
	tester.newPeer("attack", hashes[:len(hashes)-1], blocks)
431 432 433 434

	// Try and sync with the malicious node
	errc := make(chan error)
	go func() {
435
		errc <- tester.sync("attack")
436 437 438
	}()
	// Make sure that syncing returns and does so with a failure
	select {
439
	case <-time.After(time.Second):
440 441 442 443 444 445
		t.Fatalf("synchronisation blocked")
	case err := <-errc:
		if err == nil {
			t.Fatalf("synchronisation succeeded")
		}
	}
446
	// Ensure that a valid chain can still pass sync
447
	if err := tester.sync("valid"); err != nil {
448 449
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
450
}
451 452 453 454

// Tests that if a malicious peers returns a non-existent block hash, it should
// eventually time out and the sync reattempted.
func TestNonExistingBlockAttack(t *testing.T) {
455 456
	tester := newTester()

457
	// Create a valid chain, but forge the last link
458
	hashes := createHashes(blockCacheLimit, knownHash)
459
	blocks := createBlocksFromHashes(hashes)
460
	tester.newPeer("valid", hashes, blocks)
461 462

	hashes[len(hashes)/2] = unknownHash
463
	tester.newPeer("attack", hashes, blocks)
464 465

	// Try and sync with the malicious node and check that it fails
466
	if err := tester.sync("attack"); err != errPeersUnavailable {
467 468
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable)
	}
469
	// Ensure that a valid chain can still pass sync
470
	if err := tester.sync("valid"); err != nil {
471 472
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
473
}
474 475 476 477

// Tests that if a malicious peer is returning hashes in a weird order, that the
// sync throttler doesn't choke on them waiting for the valid blocks.
func TestInvalidHashOrderAttack(t *testing.T) {
478 479
	tester := newTester()

480
	// Create a valid long chain, but reverse some hashes within
481
	hashes := createHashes(4*blockCacheLimit, knownHash)
482
	blocks := createBlocksFromHashes(hashes)
483
	tester.newPeer("valid", hashes, blocks)
484

485 486 487 488 489
	chunk1 := make([]common.Hash, blockCacheLimit)
	chunk2 := make([]common.Hash, blockCacheLimit)
	copy(chunk1, hashes[blockCacheLimit:2*blockCacheLimit])
	copy(chunk2, hashes[2*blockCacheLimit:3*blockCacheLimit])

490 491 492
	copy(hashes[2*blockCacheLimit:], chunk1)
	copy(hashes[blockCacheLimit:], chunk2)
	tester.newPeer("attack", hashes, blocks)
493 494

	// Try and sync with the malicious node and check that it fails
495
	if err := tester.sync("attack"); err != errInvalidChain {
496
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
497 498
	}
	// Ensure that a valid chain can still pass sync
499
	if err := tester.sync("valid"); err != nil {
500 501 502
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
}
503 504 505 506

// Tests that if a malicious peer makes up a random hash chain and tries to push
// indefinitely, it actually gets caught with it.
func TestMadeupHashChainAttack(t *testing.T) {
507
	tester := newTester()
508
	blockSoftTTL = 100 * time.Millisecond
509 510 511
	crossCheckCycle = 25 * time.Millisecond

	// Create a long chain of hashes without backing blocks
512 513 514 515 516
	hashes := createHashes(4*blockCacheLimit, knownHash)
	blocks := createBlocksFromHashes(hashes)

	tester.newPeer("valid", hashes, blocks)
	tester.newPeer("attack", createHashes(1024*blockCacheLimit, knownHash), nil)
517 518

	// Try and sync with the malicious node and check that it fails
519
	if err := tester.sync("attack"); err != errCrossCheckFailed {
520
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
521
	}
522
	// Ensure that a valid chain can still pass sync
523
	if err := tester.sync("valid"); err != nil {
524 525
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
526
}
527

528 529 530 531 532 533
// Tests that if a malicious peer makes up a random hash chain, and tries to push
// indefinitely, one hash at a time, it actually gets caught with it. The reason
// this is separate from the classical made up chain attack is that sending hashes
// one by one prevents reliable block/parent verification.
func TestMadeupHashChainDrippingAttack(t *testing.T) {
	// Create a random chain of hashes to drip
534
	hashes := createHashes(16*blockCacheLimit, knownHash)
535
	tester := newTester()
536 537 538

	// Try and sync with the attacker, one hash at a time
	tester.maxHashFetch = 1
539
	tester.newPeer("attack", hashes, nil)
540
	if err := tester.sync("attack"); err != errStallingPeer {
541
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
542 543 544
	}
}

545 546 547
// Tests that if a malicious peer makes up a random block chain, and tried to
// push indefinitely, it actually gets caught with it.
func TestMadeupBlockChainAttack(t *testing.T) {
548
	defaultBlockTTL := blockSoftTTL
549 550
	defaultCrossCheckCycle := crossCheckCycle

551
	blockSoftTTL = 100 * time.Millisecond
552 553 554
	crossCheckCycle = 25 * time.Millisecond

	// Create a long chain of blocks and simulate an invalid chain by dropping every second
555
	hashes := createHashes(16*blockCacheLimit, knownHash)
556 557 558 559 560 561 562
	blocks := createBlocksFromHashes(hashes)

	gapped := make([]common.Hash, len(hashes)/2)
	for i := 0; i < len(gapped); i++ {
		gapped[i] = hashes[2*i]
	}
	// Try and sync with the malicious node and check that it fails
563 564
	tester := newTester()
	tester.newPeer("attack", gapped, blocks)
565
	if err := tester.sync("attack"); err != errCrossCheckFailed {
566
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
567 568
	}
	// Ensure that a valid chain can still pass sync
569
	blockSoftTTL = defaultBlockTTL
570 571
	crossCheckCycle = defaultCrossCheckCycle

572
	tester.newPeer("valid", hashes, blocks)
573
	if err := tester.sync("valid"); err != nil {
574 575 576
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
}
577 578 579 580 581

// Advanced form of the above forged blockchain attack, where not only does the
// attacker make up a valid hashes for random blocks, but also forges the block
// parents to point to existing hashes.
func TestMadeupParentBlockChainAttack(t *testing.T) {
582 583
	tester := newTester()

584
	defaultBlockTTL := blockSoftTTL
585 586
	defaultCrossCheckCycle := crossCheckCycle

587
	blockSoftTTL = 100 * time.Millisecond
588 589 590
	crossCheckCycle = 25 * time.Millisecond

	// Create a long chain of blocks and simulate an invalid chain by dropping every second
591
	hashes := createHashes(16*blockCacheLimit, knownHash)
592
	blocks := createBlocksFromHashes(hashes)
593 594 595 596
	tester.newPeer("valid", hashes, blocks)

	for _, block := range blocks {
		block.ParentHeaderHash = knownHash // Simulate pointing to already known hash
597
	}
598 599
	tester.newPeer("attack", hashes, blocks)

600
	// Try and sync with the malicious node and check that it fails
601
	if err := tester.sync("attack"); err != errCrossCheckFailed {
602
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
603 604
	}
	// Ensure that a valid chain can still pass sync
605
	blockSoftTTL = defaultBlockTTL
606 607
	crossCheckCycle = defaultCrossCheckCycle

608
	if err := tester.sync("valid"); err != nil {
609 610 611
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
}
612 613 614 615 616 617

// Tests that if one/multiple malicious peers try to feed a banned blockchain to
// the downloader, it will not keep refetching the same chain indefinitely, but
// gradually block pieces of it, until it's head is also blocked.
func TestBannedChainStarvationAttack(t *testing.T) {
	// Create the tester and ban the selected hash
618
	tester := newTester()
619 620
	tester.downloader.banned.Add(bannedHash)

621 622 623 624 625 626 627 628 629 630
	// Construct a valid chain, for it and ban the fork
	hashes := createHashes(8*blockCacheLimit, knownHash)
	blocks := createBlocksFromHashes(hashes)
	tester.newPeer("valid", hashes, blocks)

	fork := len(hashes)/2 - 23
	hashes = append(createHashes(4*blockCacheLimit, bannedHash), hashes[fork:]...)
	blocks = createBlocksFromHashes(hashes)
	tester.newPeer("attack", hashes, blocks)

631 632 633 634
	// Iteratively try to sync, and verify that the banned hash list grows until
	// the head of the invalid chain is blocked too.
	for banned := tester.downloader.banned.Size(); ; {
		// Try to sync with the attacker, check hash chain failure
635
		if err := tester.sync("attack"); err != errInvalidChain {
636 637 638
			if tester.downloader.banned.Has(hashes[0]) && err == errBannedHead {
				break
			}
639
			t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
640 641 642 643 644 645 646 647
		}
		// Check that the ban list grew with at least 1 new item, or all banned
		bans := tester.downloader.banned.Size()
		if bans < banned+1 {
			t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1)
		}
		banned = bans
	}
648
	// Check that after banning an entire chain, bad peers get dropped
649
	if err := tester.newPeer("new attacker", hashes, blocks); err != errBannedHead {
650 651
		t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
	}
652
	if peer := tester.downloader.peers.Peer("new attacker"); peer != nil {
653 654
		t.Fatalf("banned attacker registered: %v", peer)
	}
655
	// Ensure that a valid chain can still pass sync
656
	if err := tester.sync("valid"); err != nil {
657 658
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
659
}
660 661 662 663 664

// Tests that if a peer sends excessively many/large invalid chains that are
// gradually banned, it will have an upper limit on the consumed memory and also
// the origin bad hashes will not be evacuated.
func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
665 666 667 668
	// Create the tester and ban the selected hash
	tester := newTester()
	tester.downloader.banned.Add(bannedHash)

669
	// Reduce the test size a bit
670 671 672
	defaultMaxBlockFetch := MaxBlockFetch
	defaultMaxBannedHashes := maxBannedHashes

673 674 675 676
	MaxBlockFetch = 4
	maxBannedHashes = 256

	// Construct a banned chain with more chunks than the ban limit
677
	hashes := createHashes(8*blockCacheLimit, knownHash)
678
	blocks := createBlocksFromHashes(hashes)
679
	tester.newPeer("valid", hashes, blocks)
680

681 682 683 684
	fork := len(hashes)/2 - 23
	hashes = append(createHashes(maxBannedHashes*MaxBlockFetch, bannedHash), hashes[fork:]...)
	blocks = createBlocksFromHashes(hashes)
	tester.newPeer("attack", hashes, blocks)
685 686 687 688 689

	// Iteratively try to sync, and verify that the banned hash list grows until
	// the head of the invalid chain is blocked too.
	for {
		// Try to sync with the attacker, check hash chain failure
690
		if err := tester.sync("attack"); err != errInvalidChain {
691
			t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
		}
		// Short circuit if the entire chain was banned
		if tester.downloader.banned.Has(hashes[0]) {
			break
		}
		// Otherwise ensure we never exceed the memory allowance and the hard coded bans are untouched
		if bans := tester.downloader.banned.Size(); bans > maxBannedHashes {
			t.Fatalf("ban cap exceeded: have %v, want max %v", bans, maxBannedHashes)
		}
		for hash, _ := range core.BadHashes {
			if !tester.downloader.banned.Has(hash) {
				t.Fatalf("hard coded ban evacuated: %x", hash)
			}
		}
	}
707 708 709 710
	// Ensure that a valid chain can still pass sync
	MaxBlockFetch = defaultMaxBlockFetch
	maxBannedHashes = defaultMaxBannedHashes

711
	if err := tester.sync("valid"); err != nil {
712 713
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
714
}
715

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
// Tests a corner case (potential attack) where a peer delivers both good as well
// as unrequested blocks to a hash request. This may trigger a different code
// path than the fully correct or fully invalid delivery, potentially causing
// internal state problems
//
// No, don't delete this test, it actually did happen!
func TestOverlappingDeliveryAttack(t *testing.T) {
	// Create an arbitrary batch of blocks ( < cache-size not to block)
	targetBlocks := blockCacheLimit - 23
	hashes := createHashes(targetBlocks, knownHash)
	blocks := createBlocksFromHashes(hashes)

	// Register an attacker that always returns non-requested blocks too
	tester := newTester()
	tester.newPeer("attack", hashes, blocks)

	rawGetBlocks := tester.downloader.peers.Peer("attack").getBlocks
	tester.downloader.peers.Peer("attack").getBlocks = func(request []common.Hash) error {
		// Add a non requested hash the screw the delivery (genesis should be fine)
		return rawGetBlocks(append(request, hashes[0]))
	}
	// Test that synchronisation can complete, check for import success
	if err := tester.sync("attack"); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
	start := time.Now()
	for len(tester.ownHashes) != len(hashes) && time.Since(start) < time.Second {
		time.Sleep(50 * time.Millisecond)
	}
	if len(tester.ownHashes) != len(hashes) {
		t.Fatalf("chain length mismatch: have %v, want %v", len(tester.ownHashes), len(hashes))
	}
}

750
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
751 752
func TestHashAttackerDropping(t *testing.T) {
	// Define the disconnection requirement for individual hash fetch errors
753 754 755 756
	tests := []struct {
		result error
		drop   bool
	}{
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
		{nil, false},                 // Sync succeeded, all is well
		{errBusy, false},             // Sync is already in progress, no problem
		{errUnknownPeer, false},      // Peer is unknown, was already dropped, don't double drop
		{errBadPeer, true},           // Peer was deemed bad for some reason, drop it
		{errStallingPeer, true},      // Peer was detected to be stalling, drop it
		{errBannedHead, true},        // Peer's head hash is a known bad hash, drop it
		{errNoPeers, false},          // No peers to download from, soft race, no issue
		{errPendingQueue, false},     // There are blocks still cached, wait to exhaust, no issue
		{errTimeout, true},           // No hashes received in due time, drop the peer
		{errEmptyHashSet, true},      // No hashes were returned as a response, drop as it's a dead end
		{errPeersUnavailable, true},  // Nobody had the advertised blocks, drop the advertiser
		{errInvalidChain, true},      // Hash chain was detected as invalid, definitely drop
		{errCrossCheckFailed, true},  // Hash-origin failed to pass a block cross check, drop
		{errCancelHashFetch, false},  // Synchronisation was canceled, origin may be innocent, don't drop
		{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
	}
	// Run the tests and check disconnection status
	tester := newTester()
	for i, tt := range tests {
		// Register a new peer and ensure it's presence
		id := fmt.Sprintf("test %d", i)
		if err := tester.newPeer(id, []common.Hash{knownHash}, nil); err != nil {
			t.Fatalf("test %d: failed to register new peer: %v", i, err)
		}
		if _, ok := tester.peerHashes[id]; !ok {
			t.Fatalf("test %d: registered peer not found", i)
		}
		// Simulate a synchronisation and check the required result
		tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }

		tester.downloader.Synchronise(id, knownHash)
		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
		}
	}
}
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827

// Tests that feeding bad blocks will result in a peer drop.
func TestBlockAttackerDropping(t *testing.T) {
	// Define the disconnection requirement for individual block import errors
	tests := []struct {
		failure bool
		drop    bool
	}{{true, true}, {false, false}}

	// Run the tests and check disconnection status
	tester := newTester()
	for i, tt := range tests {
		// Register a new peer and ensure it's presence
		id := fmt.Sprintf("test %d", i)
		if err := tester.newPeer(id, []common.Hash{common.Hash{}}, nil); err != nil {
			t.Fatalf("test %d: failed to register new peer: %v", i, err)
		}
		if _, ok := tester.peerHashes[id]; !ok {
			t.Fatalf("test %d: registered peer not found", i)
		}
		// Assemble a good or bad block, depending of the test
		raw := createBlock(1, knownHash, common.Hash{})
		if tt.failure {
			raw = createBlock(1, unknownHash, common.Hash{})
		}
		block := &Block{OriginPeer: id, RawBlock: raw}

		// Simulate block processing and check the result
		tester.downloader.queue.blockCache[0] = block
		tester.downloader.process()
		if _, ok := tester.peerHashes[id]; !ok != tt.drop {
			t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.failure, !ok, tt.drop)
		}
	}
}