snapshot_sync_test.go 16.9 KB
Newer Older
E
ethersphere 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package stream

import (
	"context"
	crand "crypto/rand"
	"fmt"
	"io"
	"os"
	"sync"
	"testing"
	"time"

	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/log"
30
	"github.com/ethereum/go-ethereum/node"
E
ethersphere 已提交
31 32 33 34
	"github.com/ethereum/go-ethereum/p2p"
	"github.com/ethereum/go-ethereum/p2p/discover"
	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
	"github.com/ethereum/go-ethereum/swarm/network"
35
	"github.com/ethereum/go-ethereum/swarm/network/simulation"
E
ethersphere 已提交
36
	"github.com/ethereum/go-ethereum/swarm/pot"
37
	"github.com/ethereum/go-ethereum/swarm/state"
E
ethersphere 已提交
38
	"github.com/ethereum/go-ethereum/swarm/storage"
39
	mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
E
ethersphere 已提交
40 41 42 43 44 45 46 47 48 49
)

const testMinProxBinSize = 2
const MaxTimeout = 600

type synctestConfig struct {
	addrs            [][]byte
	hashes           []storage.Address
	idToChunksMap    map[discover.NodeID][]int
	chunksToNodesMap map[string][]int
50
	addrToIDMap      map[string]discover.NodeID
E
ethersphere 已提交
51 52 53 54 55 56 57 58 59
}

//This test is a syncing test for nodes.
//One node is randomly selected to be the pivot node.
//A configurable number of chunks and nodes can be
//provided to the test, the number of chunks is uploaded
//to the pivot node, and we check that nodes get the chunks
//they are expected to store based on the syncing protocol.
//Number of chunks and nodes can be provided via commandline too.
60
func TestSyncingViaGlobalSync(t *testing.T) {
E
ethersphere 已提交
61 62 63 64
	//if nodes/chunks have been provided via commandline,
	//run the tests with these values
	if *nodes != 0 && *chunks != 0 {
		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
65
		testSyncingViaGlobalSync(t, *chunks, *nodes)
E
ethersphere 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
	} else {
		var nodeCnt []int
		var chnkCnt []int
		//if the `longrunning` flag has been provided
		//run more test combinations
		if *longrunning {
			chnkCnt = []int{1, 8, 32, 256, 1024}
			nodeCnt = []int{16, 32, 64, 128, 256}
		} else {
			//default test
			chnkCnt = []int{4, 32}
			nodeCnt = []int{32, 16}
		}
		for _, chnk := range chnkCnt {
			for _, n := range nodeCnt {
				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
82
				testSyncingViaGlobalSync(t, chnk, n)
E
ethersphere 已提交
83 84 85 86 87
			}
		}
	}
}

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
func TestSyncingViaDirectSubscribe(t *testing.T) {
	//if nodes/chunks have been provided via commandline,
	//run the tests with these values
	if *nodes != 0 && *chunks != 0 {
		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
		err := testSyncingViaDirectSubscribe(*chunks, *nodes)
		if err != nil {
			t.Fatal(err)
		}
	} else {
		var nodeCnt []int
		var chnkCnt []int
		//if the `longrunning` flag has been provided
		//run more test combinations
		if *longrunning {
			chnkCnt = []int{1, 8, 32, 256, 1024}
			nodeCnt = []int{32, 16}
		} else {
			//default test
			chnkCnt = []int{4, 32}
			nodeCnt = []int{32, 16}
		}
		for _, chnk := range chnkCnt {
			for _, n := range nodeCnt {
				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
				err := testSyncingViaDirectSubscribe(chnk, n)
				if err != nil {
					t.Fatal(err)
				}
			}
		}
E
ethersphere 已提交
119 120 121
	}
}

122 123 124
func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
	sim := simulation.New(map[string]simulation.ServiceFunc{
		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
E
ethersphere 已提交
125

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
			id := ctx.Config.ID
			addr := network.NewAddrFromNodeID(id)
			store, datadir, err := createTestLocalStorageForID(id, addr)
			if err != nil {
				return nil, nil, err
			}
			bucket.Store(bucketKeyStore, store)
			cleanup = func() {
				os.RemoveAll(datadir)
				store.Close()
			}
			localStore := store.(*storage.LocalStore)
			db := storage.NewDBAPI(localStore)
			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
			delivery := NewDelivery(kad, db)
E
ethersphere 已提交
141

142 143 144 145 146
			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
				DoSync:          true,
				SyncUpdateDelay: 3 * time.Second,
			})
			bucket.Store(bucketKeyRegistry, r)
E
ethersphere 已提交
147

148
			return r, cleanup, nil
E
ethersphere 已提交
149

150 151 152
		},
	})
	defer sim.Close()
E
ethersphere 已提交
153

154
	log.Info("Initializing test config")
E
ethersphere 已提交
155

156
	conf := &synctestConfig{}
E
ethersphere 已提交
157 158 159
	//map of discover ID to indexes of chunks expected at that ID
	conf.idToChunksMap = make(map[discover.NodeID][]int)
	//map of overlay address to discover ID
160
	conf.addrToIDMap = make(map[string]discover.NodeID)
E
ethersphere 已提交
161 162
	//array where the generated chunk hashes will be stored
	conf.hashes = make([]storage.Address, 0)
163 164

	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
E
ethersphere 已提交
165
	if err != nil {
166
		t.Fatal(err)
E
ethersphere 已提交
167 168
	}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
	defer cancelSimRun()

	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
		nodeIDs := sim.UpNodeIDs()
		for _, n := range nodeIDs {
			//get the kademlia overlay address from this ID
			a := network.ToOverlayAddr(n.Bytes())
			//append it to the array of all overlay addresses
			conf.addrs = append(conf.addrs, a)
			//the proximity calculation is on overlay addr,
			//the p2p/simulations check func triggers on discover.NodeID,
			//so we need to know which overlay addr maps to which nodeID
			conf.addrToIDMap[string(a)] = n
		}

		//get the the node at that index
		//this is the node selected for upload
		node := sim.RandomUpNode()
		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
		if !ok {
			return fmt.Errorf("No localstore")
		}
		lstore := item.(*storage.LocalStore)
		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
		if err != nil {
			return err
		}
		conf.hashes = append(conf.hashes, hashes...)
		mapKeysToNodes(conf)

		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
			return err
E
ethersphere 已提交
202 203
		}

204 205 206 207 208 209 210
		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
		// or until the timeout is reached.
		allSuccess := false
		var gDir string
		var globalStore *mockdb.GlobalStore
		if *useMockStore {
			gDir, globalStore, err = createGlobalStore()
E
ethersphere 已提交
211
			if err != nil {
212
				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
E
ethersphere 已提交
213
			}
214 215 216 217 218 219 220
			defer func() {
				os.RemoveAll(gDir)
				err := globalStore.Close()
				if err != nil {
					log.Error("Error closing global store! %v", "err", err)
				}
			}()
E
ethersphere 已提交
221
		}
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
		for !allSuccess {
			for _, id := range nodeIDs {
				//for each expected chunk, check if it is in the local store
				localChunks := conf.idToChunksMap[id]
				localSuccess := true
				for _, ch := range localChunks {
					//get the real chunk by the index in the index array
					chunk := conf.hashes[ch]
					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
					//check if the expected chunk is indeed in the localstore
					var err error
					if *useMockStore {
						//use the globalStore if the mockStore should be used; in that case,
						//the complete localStore stack is bypassed for getting the chunk
						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
					} else {
						//use the actual localstore
						item, ok := sim.NodeItem(id, bucketKeyStore)
						if !ok {
							return fmt.Errorf("Error accessing localstore")
						}
						lstore := item.(*storage.LocalStore)
						_, err = lstore.Get(ctx, chunk)
					}
					if err != nil {
						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
						localSuccess = false
					} else {
						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
					}
				}
				allSuccess = localSuccess
			}
		}
		if !allSuccess {
			return fmt.Errorf("Not all chunks succeeded!")
		}
		return nil
	})
E
ethersphere 已提交
261

262 263 264 265
	if result.Error != nil {
		t.Fatal(result.Error)
	}
}
E
ethersphere 已提交
266

267 268
/*
The test generates the given number of chunks
E
ethersphere 已提交
269

270 271 272
For every chunk generated, the nearest node addresses
are identified, we verify that the nodes closer to the
chunk addresses actually do have the chunks in their local stores.
E
ethersphere 已提交
273

274 275 276 277 278 279 280
The test loads a snapshot file to construct the swarm network,
assuming that the snapshot file identifies a healthy
kademlia network. The snapshot should have 'streamer' in its service list.
*/
func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
	sim := simulation.New(map[string]simulation.ServiceFunc{
		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
E
ethersphere 已提交
281

282 283 284
			id := ctx.Config.ID
			addr := network.NewAddrFromNodeID(id)
			store, datadir, err := createTestLocalStorageForID(id, addr)
E
ethersphere 已提交
285
			if err != nil {
286
				return nil, nil, err
E
ethersphere 已提交
287
			}
288 289 290 291
			bucket.Store(bucketKeyStore, store)
			cleanup = func() {
				os.RemoveAll(datadir)
				store.Close()
E
ethersphere 已提交
292
			}
293 294 295 296
			localStore := store.(*storage.LocalStore)
			db := storage.NewDBAPI(localStore)
			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
			delivery := NewDelivery(kad, db)
E
ethersphere 已提交
297

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil)
			bucket.Store(bucketKeyRegistry, r)

			fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
			bucket.Store(bucketKeyFileStore, fileStore)

			return r, cleanup, nil

		},
	})
	defer sim.Close()

	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
	defer cancelSimRun()

	conf := &synctestConfig{}
	//map of discover ID to indexes of chunks expected at that ID
	conf.idToChunksMap = make(map[discover.NodeID][]int)
	//map of overlay address to discover ID
	conf.addrToIDMap = make(map[string]discover.NodeID)
	//array where the generated chunk hashes will be stored
	conf.hashes = make([]storage.Address, 0)

	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
	if err != nil {
		return err
	}

	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
		nodeIDs := sim.UpNodeIDs()
		for _, n := range nodeIDs {
			//get the kademlia overlay address from this ID
			a := network.ToOverlayAddr(n.Bytes())
			//append it to the array of all overlay addresses
			conf.addrs = append(conf.addrs, a)
			//the proximity calculation is on overlay addr,
			//the p2p/simulations check func triggers on discover.NodeID,
			//so we need to know which overlay addr maps to which nodeID
			conf.addrToIDMap[string(a)] = n
E
ethersphere 已提交
337 338
		}

339 340 341 342 343 344
		var subscriptionCount int

		filter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(4)
		eventC := sim.PeerEvents(ctx, nodeIDs, filter)

		for j, node := range nodeIDs {
E
ethersphere 已提交
345 346
			log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
			//start syncing!
347 348 349 350 351 352
			item, ok := sim.NodeItem(node, bucketKeyRegistry)
			if !ok {
				return fmt.Errorf("No registry")
			}
			registry := item.(*Registry)

E
ethersphere 已提交
353
			var cnt int
354
			cnt, err = startSyncing(registry, conf)
E
ethersphere 已提交
355 356 357 358 359 360 361 362
			if err != nil {
				return err
			}
			//increment the number of subscriptions we need to wait for
			//by the count returned from startSyncing (SYNC subscriptions)
			subscriptionCount += cnt
		}

363 364 365
		for e := range eventC {
			if e.Error != nil {
				return e.Error
E
ethersphere 已提交
366 367 368 369 370 371
			}
			subscriptionCount--
			if subscriptionCount == 0 {
				break
			}
		}
372 373 374 375 376
		//select a random node for upload
		node := sim.RandomUpNode()
		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
		if !ok {
			return fmt.Errorf("No localstore")
E
ethersphere 已提交
377
		}
378 379 380 381 382 383 384
		lstore := item.(*storage.LocalStore)
		hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
		if err != nil {
			return err
		}
		conf.hashes = append(conf.hashes, hashes...)
		mapKeysToNodes(conf)
E
ethersphere 已提交
385

386 387
		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
			return err
E
ethersphere 已提交
388
		}
389 390 391 392 393

		var gDir string
		var globalStore *mockdb.GlobalStore
		if *useMockStore {
			gDir, globalStore, err = createGlobalStore()
E
ethersphere 已提交
394
			if err != nil {
395
				return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
E
ethersphere 已提交
396
			}
397
			defer os.RemoveAll(gDir)
E
ethersphere 已提交
398
		}
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
		// or until the timeout is reached.
		allSuccess := false
		for !allSuccess {
			for _, id := range nodeIDs {
				//for each expected chunk, check if it is in the local store
				localChunks := conf.idToChunksMap[id]
				localSuccess := true
				for _, ch := range localChunks {
					//get the real chunk by the index in the index array
					chunk := conf.hashes[ch]
					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
					//check if the expected chunk is indeed in the localstore
					var err error
					if *useMockStore {
						//use the globalStore if the mockStore should be used; in that case,
						//the complete localStore stack is bypassed for getting the chunk
						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
					} else {
						//use the actual localstore
						item, ok := sim.NodeItem(id, bucketKeyStore)
						if !ok {
							return fmt.Errorf("Error accessing localstore")
						}
						lstore := item.(*storage.LocalStore)
						_, err = lstore.Get(ctx, chunk)
					}
					if err != nil {
						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
						localSuccess = false
					} else {
						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
					}
				}
				allSuccess = localSuccess
E
ethersphere 已提交
434 435
			}
		}
436 437 438 439
		if !allSuccess {
			return fmt.Errorf("Not all chunks succeeded!")
		}
		return nil
E
ethersphere 已提交
440 441 442 443 444
	})

	if result.Error != nil {
		return result.Error
	}
445

E
ethersphere 已提交
446 447 448 449 450 451 452 453
	log.Info("Simulation terminated")
	return nil
}

//the server func to start syncing
//issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
//the kademlia's `EachBin` function.
//returns the number of subscriptions requested
454
func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
E
ethersphere 已提交
455 456 457 458 459 460 461 462 463 464 465
	var err error

	kad, ok := r.delivery.overlay.(*network.Kademlia)
	if !ok {
		return 0, fmt.Errorf("Not a Kademlia!")
	}

	subCnt := 0
	//iterate over each bin and solicit needed subscription to bins
	kad.EachBin(r.addr.Over(), pof, 0, func(conn network.OverlayConn, po int) bool {
		//identify begin and start index of the bin(s) we want to subscribe to
466
		histRange := &Range{}
E
ethersphere 已提交
467 468

		subCnt++
469
		err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), histRange, Top)
E
ethersphere 已提交
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
		if err != nil {
			log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
			return false
		}
		return true

	})
	return subCnt, nil
}

//map chunk keys to addresses which are responsible
func mapKeysToNodes(conf *synctestConfig) {
	kmap := make(map[string][]int)
	nodemap := make(map[string][]int)
	//build a pot for chunk hashes
	np := pot.NewPot(nil, 0)
	indexmap := make(map[string]int)
	for i, a := range conf.addrs {
		indexmap[string(a)] = i
		np, _, _ = pot.Add(np, a, pof)
	}
	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
	for i := 0; i < len(conf.hashes); i++ {
		pl := 256 //highest possible proximity
		var nns []int
		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
			a := val.([]byte)
			if pl < 256 && pl != po {
				return false
			}
			if pl == 256 || pl == po {
502
				log.Trace(fmt.Sprintf("appending %s", conf.addrToIDMap[string(a)]))
E
ethersphere 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516
				nns = append(nns, indexmap[string(a)])
				nodemap[string(a)] = append(nodemap[string(a)], i)
			}
			if pl == 256 && len(nns) >= testMinProxBinSize {
				//maxProxBinSize has been reached at this po, so save it
				//we will add all other nodes at the same po
				pl = po
			}
			return true
		})
		kmap[string(conf.hashes[i])] = nns
	}
	for addr, chunks := range nodemap {
		//this selects which chunks are expected to be found with the given node
517
		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
E
ethersphere 已提交
518 519 520 521 522 523
	}
	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
	conf.chunksToNodesMap = kmap
}

//upload a file(chunks) to a single local node store
524
func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
E
ethersphere 已提交
525 526
	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
527
	size := chunkSize
E
ethersphere 已提交
528 529
	var rootAddrs []storage.Address
	for i := 0; i < chunkCount; i++ {
530
		rk, wait, err := fileStore.Store(context.TODO(), io.LimitReader(crand.Reader, int64(size)), int64(size), false)
531 532 533
		if err != nil {
			return nil, err
		}
534
		err = wait(context.TODO())
E
ethersphere 已提交
535 536 537 538 539 540 541 542
		if err != nil {
			return nil, err
		}
		rootAddrs = append(rootAddrs, (rk))
	}

	return rootAddrs, nil
}