downloader_test.go 21.1 KB
Newer Older
1 2 3 4 5 6 7 8 9
package downloader

import (
	"encoding/binary"
	"math/big"
	"testing"
	"time"

	"github.com/ethereum/go-ethereum/common"
10
	"github.com/ethereum/go-ethereum/core"
11
	"github.com/ethereum/go-ethereum/core/types"
O
obscuren 已提交
12
	"github.com/ethereum/go-ethereum/event"
13 14
)

15 16 17
var (
	knownHash   = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
	unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
18
	bannedHash  = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}
19
)
20

21
func createHashes(start, amount int) (hashes []common.Hash) {
22 23 24 25
	hashes = make([]common.Hash, amount+1)
	hashes[len(hashes)-1] = knownHash

	for i := range hashes[:len(hashes)-1] {
26
		binary.BigEndian.PutUint64(hashes[i][:8], uint64(start+i+2))
27 28 29 30
	}
	return
}

31
func createBlock(i int, parent, hash common.Hash) *types.Block {
32 33 34
	header := &types.Header{Number: big.NewInt(int64(i))}
	block := types.NewBlockWithHeader(header)
	block.HeaderHash = hash
35
	block.ParentHeaderHash = parent
36 37 38
	return block
}

39 40
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
	blocks := make(map[common.Hash]*types.Block)
41 42 43 44 45 46
	for i := 0; i < len(hashes); i++ {
		parent := knownHash
		if i < len(hashes)-1 {
			parent = hashes[i+1]
		}
		blocks[hashes[i]] = createBlock(len(hashes)-i, parent, hashes[i])
47 48 49 50 51
	}
	return blocks
}

type downloadTester struct {
52 53 54 55 56 57
	downloader *Downloader

	hashes []common.Hash                // Chain of hashes simulating
	blocks map[common.Hash]*types.Block // Blocks associated with the hashes
	chain  []common.Hash                // Block-chain being constructed

58 59
	maxHashFetch int // Overrides the maximum number of retrieved hashes

O
obscuren 已提交
60 61 62
	t            *testing.T
	done         chan bool
	activePeerId string
63 64 65
}

func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
66 67 68 69 70 71 72 73 74
	tester := &downloadTester{
		t: t,

		hashes: hashes,
		blocks: blocks,
		chain:  []common.Hash{knownHash},

		done: make(chan bool),
	}
O
obscuren 已提交
75 76
	var mux event.TypeMux
	downloader := New(&mux, tester.hasBlock, tester.getBlock)
77 78 79 80 81
	tester.downloader = downloader

	return tester
}

82 83 84
// sync is a simple wrapper around the downloader to start synchronisation and
// block until it returns
func (dl *downloadTester) sync(peerId string, head common.Hash) error {
O
obscuren 已提交
85
	dl.activePeerId = peerId
86 87 88 89 90 91
	return dl.downloader.Synchronise(peerId, head)
}

// syncTake is starts synchronising with a remote peer, but concurrently it also
// starts fetching blocks that the downloader retrieved. IT blocks until both go
// routines terminate.
92
func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, error) {
93 94
	// Start a block collector to take blocks as they become available
	done := make(chan struct{})
95
	took := []*Block{}
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	go func() {
		for running := true; running; {
			select {
			case <-done:
				running = false
			default:
				time.Sleep(time.Millisecond)
			}
			// Take a batch of blocks and accumulate
			took = append(took, dl.downloader.TakeBlocks()...)
		}
		done <- struct{}{}
	}()
	// Start the downloading, sync the taker and return
	err := dl.sync(peerId, head)

	done <- struct{}{}
	<-done

	return took, err
O
obscuren 已提交
116 117
}

118
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
119 120 121 122
	for _, h := range dl.chain {
		if h == hash {
			return true
		}
123 124 125 126
	}
	return false
}

127 128
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
	return dl.blocks[knownHash]
129 130
}

131 132
// getHashes retrieves a batch of hashes for reconstructing the chain.
func (dl *downloadTester) getHashes(head common.Hash) error {
133
	limit := MaxHashFetch
134 135 136
	if dl.maxHashFetch > 0 {
		limit = dl.maxHashFetch
	}
137
	// Gather the next batch of hashes
138
	hashes := make([]common.Hash, 0, limit)
139 140
	for i, hash := range dl.hashes {
		if hash == head {
141
			i++
142 143 144 145 146 147 148 149
			for len(hashes) < cap(hashes) && i < len(dl.hashes) {
				hashes = append(hashes, dl.hashes[i])
				i++
			}
			break
		}
	}
	// Delay delivery a bit to allow attacks to unfold
150 151 152 153 154
	id := dl.activePeerId
	go func() {
		time.Sleep(time.Millisecond)
		dl.downloader.DeliverHashes(id, hashes)
	}()
155 156 157 158 159
	return nil
}

func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
	return func(hashes []common.Hash) error {
160 161 162 163 164
		blocks := make([]*types.Block, 0, len(hashes))
		for _, hash := range hashes {
			if block, ok := dl.blocks[hash]; ok {
				blocks = append(blocks, block)
			}
165
		}
166
		go dl.downloader.DeliverBlocks(id, blocks)
167 168 169 170 171

		return nil
	}
}

172 173 174
// newPeer registers a new block download source into the syncer.
func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) error {
	return dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
175 176
}

177 178 179 180
// Tests that simple synchronization, without throttling from a good peer works.
func TestSynchronisation(t *testing.T) {
	// Create a small enough block chain to download and the tester
	targetBlocks := blockCacheLimit - 15
181
	hashes := createHashes(0, targetBlocks)
182 183
	blocks := createBlocksFromHashes(hashes)

184
	tester := newTester(t, hashes, blocks)
185
	tester.newPeer("peer", big.NewInt(10000), hashes[0])
186

187 188 189
	// Synchronise with the peer and make sure all blocks were retrieved
	if err := tester.sync("peer", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
190
	}
191
	if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks {
192
		t.Fatalf("synchronised block mismatch: have %v, want %v", queued, targetBlocks)
193 194 195
	}
}

196 197 198 199
// Tests that the synchronized blocks can be correctly retrieved.
func TestBlockTaking(t *testing.T) {
	// Create a small enough block chain to download and the tester
	targetBlocks := blockCacheLimit - 15
200 201 202
	hashes := createHashes(0, targetBlocks)
	blocks := createBlocksFromHashes(hashes)

203 204
	tester := newTester(t, hashes, blocks)
	tester.newPeer("peer", big.NewInt(10000), hashes[0])
205

206 207 208
	// Synchronise with the peer and test block retrieval
	if err := tester.sync("peer", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
209
	}
210 211
	if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks {
		t.Fatalf("took block mismatch: have %v, want %v", len(took), targetBlocks)
212
	}
213
}
214

215
// Tests that an inactive downloader will not accept incoming hashes and blocks.
216
func TestInactiveDownloader(t *testing.T) {
217 218
	// Create a small enough block chain to download and the tester
	targetBlocks := blockCacheLimit - 15
219 220 221
	hashes := createHashes(0, targetBlocks)
	blocks := createBlocksFromHashSet(createHashSet(hashes))

222
	tester := newTester(t, nil, nil)
223

224 225 226 227 228 229
	// Check that neither hashes nor blocks are accepted
	if err := tester.downloader.DeliverHashes("bad peer", hashes); err != errNoSyncActive {
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
	}
	if err := tester.downloader.DeliverBlocks("bad peer", blocks); err != errNoSyncActive {
		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
230 231 232
	}
}

233
// Tests that a canceled download wipes all previously accumulated state.
234
func TestCancel(t *testing.T) {
235 236
	// Create a small enough block chain to download and the tester
	targetBlocks := blockCacheLimit - 15
237 238 239
	hashes := createHashes(0, targetBlocks)
	blocks := createBlocksFromHashes(hashes)

240 241
	tester := newTester(t, hashes, blocks)
	tester.newPeer("peer", big.NewInt(10000), hashes[0])
242

243 244 245
	// Synchronise with the peer, but cancel afterwards
	if err := tester.sync("peer", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
246 247
	}
	if !tester.downloader.Cancel() {
248
		t.Fatalf("cancel operation failed")
249
	}
250 251 252 253 254 255 256
	// Make sure the queue reports empty and no blocks can be taken
	hashCount, blockCount := tester.downloader.queue.Size()
	if hashCount > 0 || blockCount > 0 {
		t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
	}
	if took := tester.downloader.TakeBlocks(); len(took) != 0 {
		t.Errorf("taken blocks mismatch: have %d, want %d", len(took), 0)
257 258 259
	}
}

260 261
// Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved.
262
func TestThrottling(t *testing.T) {
263 264
	// Create a long block chain to download and the tester
	targetBlocks := 8 * blockCacheLimit
265 266 267
	hashes := createHashes(0, targetBlocks)
	blocks := createBlocksFromHashes(hashes)

268 269
	tester := newTester(t, hashes, blocks)
	tester.newPeer("peer", big.NewInt(10000), hashes[0])
270

271 272 273 274 275 276 277
	// Start a synchronisation concurrently
	errc := make(chan error)
	go func() {
		errc <- tester.sync("peer", hashes[0])
	}()
	// Iteratively take some blocks, always checking the retrieval count
	for total := 0; total < targetBlocks; {
278 279 280 281 282 283 284
		// Wait a bit for sync to complete
		for start := time.Now(); time.Since(start) < 3*time.Second; {
			time.Sleep(25 * time.Millisecond)
			if len(tester.downloader.queue.blockPool) == blockCacheLimit {
				break
			}
		}
285 286 287 288 289 290 291 292 293
		// Fetch the next batch of blocks
		took := tester.downloader.TakeBlocks()
		if len(took) != blockCacheLimit {
			t.Fatalf("block count mismatch: have %v, want %v", len(took), blockCacheLimit)
		}
		total += len(took)
		if total > targetBlocks {
			t.Fatalf("target block count mismatch: have %v, want %v", total, targetBlocks)
		}
294
	}
295 296
	if err := <-errc; err != nil {
		t.Fatalf("block synchronization failed: %v", err)
297 298
	}
}
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315

// Tests that if a peer returns an invalid chain with a block pointing to a non-
// existing parent, it is correctly detected and handled.
func TestNonExistingParentAttack(t *testing.T) {
	// Forge a single-link chain with a forged header
	hashes := createHashes(0, 1)
	blocks := createBlocksFromHashes(hashes)

	forged := blocks[hashes[0]]
	forged.ParentHeaderHash = unknownHash

	// Try and sync with the malicious node and check that it fails
	tester := newTester(t, hashes, blocks)
	tester.newPeer("attack", big.NewInt(10000), hashes[0])
	if err := tester.sync("attack", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
316 317 318
	bs := tester.downloader.TakeBlocks()
	if len(bs) != 1 {
		t.Fatalf("retrieved block mismatch: have %v, want %v", len(bs), 1)
319
	}
320
	if tester.hasBlock(bs[0].RawBlock.ParentHash()) {
321
		t.Fatalf("tester knows about the unknown hash")
322 323 324 325 326 327 328 329 330
	}
	tester.downloader.Cancel()

	// Reconstruct a valid chain, and try to synchronize with it
	forged.ParentHeaderHash = knownHash
	tester.newPeer("valid", big.NewInt(20000), hashes[0])
	if err := tester.sync("valid", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
331
	bs = tester.downloader.TakeBlocks()
332
	if len(bs) != 1 {
333
		t.Fatalf("retrieved block mismatch: have %v, want %v", len(bs), 1)
334
	}
335
	if !tester.hasBlock(bs[0].RawBlock.ParentHash()) {
336 337
		t.Fatalf("tester doesn't know about the origin hash")
	}
338
}
339 340 341 342 343

// Tests that if a malicious peers keeps sending us repeating hashes, we don't
// loop indefinitely.
func TestRepeatingHashAttack(t *testing.T) {
	// Create a valid chain, but drop the last link
344
	hashes := createHashes(0, blockCacheLimit)
345
	blocks := createBlocksFromHashes(hashes)
346
	forged := hashes[:len(hashes)-1]
347 348

	// Try and sync with the malicious node
349 350
	tester := newTester(t, forged, blocks)
	tester.newPeer("attack", big.NewInt(10000), forged[0])
351 352 353 354 355 356 357 358

	errc := make(chan error)
	go func() {
		errc <- tester.sync("attack", hashes[0])
	}()

	// Make sure that syncing returns and does so with a failure
	select {
359
	case <-time.After(time.Second):
360 361 362 363 364 365
		t.Fatalf("synchronisation blocked")
	case err := <-errc:
		if err == nil {
			t.Fatalf("synchronisation succeeded")
		}
	}
366 367 368 369 370 371
	// Ensure that a valid chain can still pass sync
	tester.hashes = hashes
	tester.newPeer("valid", big.NewInt(20000), hashes[0])
	if err := tester.sync("valid", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
372
}
373 374 375 376 377

// Tests that if a malicious peers returns a non-existent block hash, it should
// eventually time out and the sync reattempted.
func TestNonExistingBlockAttack(t *testing.T) {
	// Create a valid chain, but forge the last link
378
	hashes := createHashes(0, blockCacheLimit)
379
	blocks := createBlocksFromHashes(hashes)
380
	origin := hashes[len(hashes)/2]
381 382 383 384 385 386 387 388 389

	hashes[len(hashes)/2] = unknownHash

	// Try and sync with the malicious node and check that it fails
	tester := newTester(t, hashes, blocks)
	tester.newPeer("attack", big.NewInt(10000), hashes[0])
	if err := tester.sync("attack", hashes[0]); err != errPeersUnavailable {
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable)
	}
390 391 392 393 394 395
	// Ensure that a valid chain can still pass sync
	hashes[len(hashes)/2] = origin
	tester.newPeer("valid", big.NewInt(20000), hashes[0])
	if err := tester.sync("valid", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
396
}
397 398 399 400 401 402 403 404

// Tests that if a malicious peer is returning hashes in a weird order, that the
// sync throttler doesn't choke on them waiting for the valid blocks.
func TestInvalidHashOrderAttack(t *testing.T) {
	// Create a valid long chain, but reverse some hashes within
	hashes := createHashes(0, 4*blockCacheLimit)
	blocks := createBlocksFromHashes(hashes)

405 406 407 408 409
	chunk1 := make([]common.Hash, blockCacheLimit)
	chunk2 := make([]common.Hash, blockCacheLimit)
	copy(chunk1, hashes[blockCacheLimit:2*blockCacheLimit])
	copy(chunk2, hashes[2*blockCacheLimit:3*blockCacheLimit])

410 411
	reverse := make([]common.Hash, len(hashes))
	copy(reverse, hashes)
412 413
	copy(reverse[2*blockCacheLimit:], chunk1)
	copy(reverse[blockCacheLimit:], chunk2)
414 415 416 417 418 419 420 421 422 423 424 425 426 427

	// Try and sync with the malicious node and check that it fails
	tester := newTester(t, reverse, blocks)
	tester.newPeer("attack", big.NewInt(10000), reverse[0])
	if _, err := tester.syncTake("attack", reverse[0]); err != ErrInvalidChain {
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
	}
	// Ensure that a valid chain can still pass sync
	tester.hashes = hashes
	tester.newPeer("valid", big.NewInt(20000), hashes[0])
	if _, err := tester.syncTake("valid", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
}
428 429 430 431

// Tests that if a malicious peer makes up a random hash chain and tries to push
// indefinitely, it actually gets caught with it.
func TestMadeupHashChainAttack(t *testing.T) {
432
	blockSoftTTL = 100 * time.Millisecond
433 434 435 436 437 438 439 440 441 442 443 444
	crossCheckCycle = 25 * time.Millisecond

	// Create a long chain of hashes without backing blocks
	hashes := createHashes(0, 1024*blockCacheLimit)

	// Try and sync with the malicious node and check that it fails
	tester := newTester(t, hashes, nil)
	tester.newPeer("attack", big.NewInt(10000), hashes[0])
	if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed {
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
	}
}
445

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
// Tests that if a malicious peer makes up a random hash chain, and tries to push
// indefinitely, one hash at a time, it actually gets caught with it. The reason
// this is separate from the classical made up chain attack is that sending hashes
// one by one prevents reliable block/parent verification.
func TestMadeupHashChainDrippingAttack(t *testing.T) {
	// Create a random chain of hashes to drip
	hashes := createHashes(0, 16*blockCacheLimit)
	tester := newTester(t, hashes, nil)

	// Try and sync with the attacker, one hash at a time
	tester.maxHashFetch = 1
	tester.newPeer("attack", big.NewInt(10000), hashes[0])
	if _, err := tester.syncTake("attack", hashes[0]); err != ErrStallingPeer {
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrStallingPeer)
	}
}

463 464 465
// Tests that if a malicious peer makes up a random block chain, and tried to
// push indefinitely, it actually gets caught with it.
func TestMadeupBlockChainAttack(t *testing.T) {
466
	defaultBlockTTL := blockSoftTTL
467 468
	defaultCrossCheckCycle := crossCheckCycle

469
	blockSoftTTL = 100 * time.Millisecond
470 471 472
	crossCheckCycle = 25 * time.Millisecond

	// Create a long chain of blocks and simulate an invalid chain by dropping every second
473
	hashes := createHashes(0, 16*blockCacheLimit)
474 475 476 477 478 479 480 481 482 483 484 485 486
	blocks := createBlocksFromHashes(hashes)

	gapped := make([]common.Hash, len(hashes)/2)
	for i := 0; i < len(gapped); i++ {
		gapped[i] = hashes[2*i]
	}
	// Try and sync with the malicious node and check that it fails
	tester := newTester(t, gapped, blocks)
	tester.newPeer("attack", big.NewInt(10000), gapped[0])
	if _, err := tester.syncTake("attack", gapped[0]); err != ErrCrossCheckFailed {
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
	}
	// Ensure that a valid chain can still pass sync
487
	blockSoftTTL = defaultBlockTTL
488 489
	crossCheckCycle = defaultCrossCheckCycle

490 491 492 493 494 495
	tester.hashes = hashes
	tester.newPeer("valid", big.NewInt(20000), hashes[0])
	if _, err := tester.syncTake("valid", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
}
496 497 498 499 500

// Advanced form of the above forged blockchain attack, where not only does the
// attacker make up a valid hashes for random blocks, but also forges the block
// parents to point to existing hashes.
func TestMadeupParentBlockChainAttack(t *testing.T) {
501
	defaultBlockTTL := blockSoftTTL
502 503
	defaultCrossCheckCycle := crossCheckCycle

504
	blockSoftTTL = 100 * time.Millisecond
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	crossCheckCycle = 25 * time.Millisecond

	// Create a long chain of blocks and simulate an invalid chain by dropping every second
	hashes := createHashes(0, 16*blockCacheLimit)
	blocks := createBlocksFromHashes(hashes)
	forges := createBlocksFromHashes(hashes)
	for hash, block := range forges {
		block.ParentHeaderHash = hash // Simulate pointing to already known hash
	}
	// Try and sync with the malicious node and check that it fails
	tester := newTester(t, hashes, forges)
	tester.newPeer("attack", big.NewInt(10000), hashes[0])
	if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed {
		t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
	}
	// Ensure that a valid chain can still pass sync
521
	blockSoftTTL = defaultBlockTTL
522 523 524 525 526 527 528 529
	crossCheckCycle = defaultCrossCheckCycle

	tester.blocks = blocks
	tester.newPeer("valid", big.NewInt(20000), hashes[0])
	if _, err := tester.syncTake("valid", hashes[0]); err != nil {
		t.Fatalf("failed to synchronise blocks: %v", err)
	}
}
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562

// Tests that if one/multiple malicious peers try to feed a banned blockchain to
// the downloader, it will not keep refetching the same chain indefinitely, but
// gradually block pieces of it, until it's head is also blocked.
func TestBannedChainStarvationAttack(t *testing.T) {
	// Construct a valid chain, but ban one of the hashes in it
	hashes := createHashes(0, 8*blockCacheLimit)
	hashes[len(hashes)/2+23] = bannedHash // weird index to have non multiple of ban chunk size

	blocks := createBlocksFromHashes(hashes)

	// Create the tester and ban the selected hash
	tester := newTester(t, hashes, blocks)
	tester.downloader.banned.Add(bannedHash)

	// Iteratively try to sync, and verify that the banned hash list grows until
	// the head of the invalid chain is blocked too.
	tester.newPeer("attack", big.NewInt(10000), hashes[0])
	for banned := tester.downloader.banned.Size(); ; {
		// Try to sync with the attacker, check hash chain failure
		if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain {
			t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
		}
		// Check that the ban list grew with at least 1 new item, or all banned
		bans := tester.downloader.banned.Size()
		if bans < banned+1 {
			if tester.downloader.banned.Has(hashes[0]) {
				break
			}
			t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1)
		}
		banned = bans
	}
563 564 565 566 567 568 569
	// Check that after banning an entire chain, bad peers get dropped
	if err := tester.newPeer("new attacker", big.NewInt(10000), hashes[0]); err != errBannedHead {
		t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
	}
	if peer := tester.downloader.peers.Peer("net attacker"); peer != nil {
		t.Fatalf("banned attacker registered: %v", peer)
	}
570
}
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612

// Tests that if a peer sends excessively many/large invalid chains that are
// gradually banned, it will have an upper limit on the consumed memory and also
// the origin bad hashes will not be evacuated.
func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
	// Reduce the test size a bit
	MaxBlockFetch = 4
	maxBannedHashes = 256

	// Construct a banned chain with more chunks than the ban limit
	hashes := createHashes(0, maxBannedHashes*MaxBlockFetch)
	hashes[len(hashes)-1] = bannedHash // weird index to have non multiple of ban chunk size

	blocks := createBlocksFromHashes(hashes)

	// Create the tester and ban the selected hash
	tester := newTester(t, hashes, blocks)
	tester.downloader.banned.Add(bannedHash)

	// Iteratively try to sync, and verify that the banned hash list grows until
	// the head of the invalid chain is blocked too.
	tester.newPeer("attack", big.NewInt(10000), hashes[0])
	for {
		// Try to sync with the attacker, check hash chain failure
		if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain {
			t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
		}
		// Short circuit if the entire chain was banned
		if tester.downloader.banned.Has(hashes[0]) {
			break
		}
		// Otherwise ensure we never exceed the memory allowance and the hard coded bans are untouched
		if bans := tester.downloader.banned.Size(); bans > maxBannedHashes {
			t.Fatalf("ban cap exceeded: have %v, want max %v", bans, maxBannedHashes)
		}
		for hash, _ := range core.BadHashes {
			if !tester.downloader.banned.Has(hash) {
				t.Fatalf("hard coded ban evacuated: %x", hash)
			}
		}
	}
}