recovery_test.go 7.8 KB
Newer Older
1 2 3 4 5
// Package recovery
package recovery

import (
	"bytes"
D
DoMyJob 已提交
6
	"crypto/sha256"
7
	"fmt"
8 9 10 11 12 13
	"io/ioutil"
	"math/rand"
	"testing"
	"time"

	"github.com/ethereum/go-ethereum/common"
D
DoMyJob 已提交
14
	"github.com/klauspost/reedsolomon"
15 16
	"github.com/yottachain/YTFS"
	ytfsCommon "github.com/yottachain/YTFS/common"
D
DoMyJob 已提交
17
	ytfsOpt "github.com/yottachain/YTFS/opt"
18 19 20 21 22 23 24 25 26 27 28
)

func TestNewDataRecovery(t *testing.T) {
	_, err := NewDataCodec(nil, nil, DefaultRecoveryOption())
	if err != nil {
		t.Fail()
	}
}

func randomFill(size uint32) []byte {
	buf := make([]byte, size, size)
29
	head := make([]byte, 16, 16)
30
	rand.Seed(int64(time.Now().Nanosecond()))
31 32
	rand.Read(head)
	copy(buf, head)
33 34 35 36
	return buf
}

func createShards(dataShards, parityShards int) ([]common.Hash, [][]byte) {
D
DoMyJob 已提交
37 38
	shards := make([][]byte, dataShards+parityShards)
	hashes := make([]common.Hash, dataShards+parityShards)
39
	dataBlkSize := ytfsOpt.DefaultOptions().DataBlockSize
D
DoMyJob 已提交
40
	for i := 0; i < dataShards; i++ {
41 42 43 44 45
		shards[i] = randomFill(dataBlkSize)
		sum256 := sha256.Sum256(shards[i])
		hashes[i] = common.BytesToHash(sum256[:])
	}

D
DoMyJob 已提交
46
	for i := dataShards; i < dataShards+parityShards; i++ {
47 48 49
		shards[i] = make([]byte, dataBlkSize)
	}

D
DoMyJob 已提交
50
	return hashes, shards
51 52
}

53
func createData(dataShards, parityShards int) ([]common.Hash, [][]byte) {
54 55 56 57
	hashes, shards := createShards(dataShards, parityShards)
	enc, _ := reedsolomon.New(dataShards, parityShards)
	enc.Encode(shards)
	//update parity hash
D
DoMyJob 已提交
58
	for i := dataShards; i < dataShards+parityShards; i++ {
59 60 61 62
		sum256 := sha256.Sum256(shards[i])
		hashes[i] = common.BytesToHash(sum256[:])
	}

63 64 65 66 67 68
	return hashes, shards
}

func initailP2PMockWithShards(hashes []common.Hash, shards [][]byte, delays ...time.Duration) (P2PNetwork, []P2PLocation) {
	locations := make([]P2PLocation, len(hashes))
	for i := 0; i < len(hashes); i++ {
69 70 71
		locations[i] = P2PLocation(common.BytesToAddress(hashes[i][:]))
	}

72 73
	p2p, _ := InititalP2PMock(locations, shards, delays...)
	return p2p, locations
74 75 76 77 78 79 80 81 82 83
}

func TestDataRecovery(t *testing.T) {
	rootDir, err := ioutil.TempDir("/tmp", "ytfsTest")
	config := ytfsOpt.DefaultOptions()
	// defer os.Remove(config.StorageName)

	yd, err := ytfs.Open(rootDir, config)

	recConfig := DefaultRecoveryOption()
84 85
	hashes, shards := createData(recConfig.DataShards, recConfig.ParityShards)
	p2pNet, p2pNodes := initailP2PMockWithShards(hashes, shards)
86

D
DoMyJob 已提交
87
	for i := 0; i < len(shards); i++ {
D
DoMyJob 已提交
88
		fmt.Printf("Data[%d] = %x:%x\n", i, hashes[i], shards[i][:20])
89
	}
90

91
	codec, err := NewDataCodec(yd, p2pNet, recConfig)
92 93 94 95
	if err != nil {
		t.Fail()
	}

96
	tdList := []*TaskDescription{}
D
DoMyJob 已提交
97
	for i := 0; i < len(shards); i++ {
98
		td := &TaskDescription{
99
			uint64(i),
100
			hashes,
101
			p2pNodes,
102
			[]uint32{uint32(i)},
103 104 105 106 107
		}
		codec.RecoverData(td)
		tdList = append(tdList, td)
	}

D
DoMyJob 已提交
108 109
	time.Sleep(2 * time.Second)
	for _, td := range tdList {
110 111 112 113
		tdStatus := codec.RecoverStatus(td)
		if tdStatus.Status != SuccessTask {
			t.Fatalf("ERROR: td status(%d): %s", tdStatus.Status, tdStatus.Desc)
		} else {
114 115 116
			data, err := yd.Get(ytfsCommon.IndexTableKey(td.Hashes[td.RecoverIDs[0]]))
			if err != nil || bytes.Compare(data, shards[td.RecoverIDs[0]]) != 0 {
				t.Fatalf("Error: err(%v), dataCompare (%d). hash(%v) data(%v) shards(%v)",
D
DoMyJob 已提交
117 118 119
					err, bytes.Compare(data, shards[td.RecoverIDs[0]]),
					td.Hashes[td.RecoverIDs[0]],
					data[:20], shards[td.RecoverIDs[0]][:20])
120 121 122
			}
		}
	}
123
}
124 125 126 127 128 129 130 131 132

func TestMultiplyDataRecovery(t *testing.T) {
	rootDir, err := ioutil.TempDir("/tmp", "ytfsTest")
	config := ytfsOpt.DefaultOptions()
	// defer os.Remove(config.StorageName)

	yd, err := ytfs.Open(rootDir, config)

	recConfig := DefaultRecoveryOption()
133 134
	hashes, shards := createData(recConfig.DataShards, recConfig.ParityShards)
	p2pNet, p2pNodes := initailP2PMockWithShards(hashes, shards)
135

D
DoMyJob 已提交
136
	for i := 0; i < len(shards); i++ {
137 138 139
		fmt.Printf("Data[%d] = %x:%x\n", i, hashes[i], shards[i][:20])
	}

140
	codec, err := NewDataCodec(yd, p2pNet, recConfig)
141 142 143 144
	if err != nil {
		t.Fail()
	}

D
DoMyJob 已提交
145 146 147
	td := &TaskDescription{
		uint64(2),
		hashes,
148
		p2pNodes,
D
DoMyJob 已提交
149 150
		[]uint32{0, 1, 2},
	}
151 152
	codec.RecoverData(td)

D
DoMyJob 已提交
153
	time.Sleep(2 * time.Second)
154 155 156 157
	tdStatus := codec.RecoverStatus(td)
	if tdStatus.Status != SuccessTask {
		t.Fatalf("ERROR: td status(%d): %s", tdStatus.Status, tdStatus.Desc)
	} else {
D
DoMyJob 已提交
158
		for i := 0; i < len(td.RecoverIDs); i++ {
159 160 161
			data, err := yd.Get(ytfsCommon.IndexTableKey(td.Hashes[td.RecoverIDs[i]]))
			if err != nil || bytes.Compare(data, shards[td.RecoverIDs[i]]) != 0 {
				t.Fatalf("Error: err(%v), dataCompare (%d). hash(%v) data(%v) shards(%v)",
D
DoMyJob 已提交
162 163 164
					err, bytes.Compare(data, shards[td.RecoverIDs[i]]),
					td.Hashes[td.RecoverIDs[i]],
					data[:20], shards[td.RecoverIDs[i]][:20])
165 166 167 168 169 170 171 172 173 174 175 176 177 178
			}
		}
	}
}

func TestDataRecoveryError(t *testing.T) {
	rootDir, err := ioutil.TempDir("/tmp", "ytfsTest")
	config := ytfsOpt.DefaultOptions()
	// defer os.Remove(config.StorageName)

	yd, err := ytfs.Open(rootDir, config)

	recConfig := DefaultRecoveryOption()
	recConfig.TimeoutInMS = 10
179 180
	hashes, shards := createData(recConfig.DataShards, recConfig.ParityShards)
	p2pNet, p2pNodes := initailP2PMockWithShards(hashes, shards)
181

D
DoMyJob 已提交
182
	for i := 0; i < len(shards); i++ {
183 184 185
		fmt.Printf("Data[%d] = %x:%x\n", i, hashes[i], shards[i][:20])
	}

186
	codec, err := NewDataCodec(yd, p2pNet, recConfig)
187 188 189 190 191
	if err != nil {
		t.Fail()
	}

	recIds := make([]uint32, recConfig.ParityShards+1)
D
DoMyJob 已提交
192 193
	for i := 0; i < len(recIds); i++ {
		recIds[i] = uint32(i)
194 195
	}

D
DoMyJob 已提交
196
	td := &TaskDescription{
197 198
		uint64(0),
		hashes,
199
		p2pNodes,
200 201 202 203 204 205 206 207 208 209 210 211 212 213
		recIds,
	}
	codec.RecoverData(td)

	tdStatus := codec.RecoverStatus(td)
	if tdStatus.Status != ErrorTask {
		t.Fatalf("ERROR: td status(%d): %s", tdStatus.Status, tdStatus.Desc)
	} else {
		t.Log("Expected error:", tdStatus)
	}

	td = &TaskDescription{
		uint64(1),
		hashes,
214
		p2pNodes,
215 216 217
		[]uint32{0},
	}
	codec.RecoverData(td)
D
DoMyJob 已提交
218
	time.Sleep(2 * time.Second)
219 220 221 222 223 224 225
	tdStatus = codec.RecoverStatus(td)
	if tdStatus.Status != ErrorTask {
		t.Fatalf("ERROR: td status(%d): %s", tdStatus.Status, tdStatus.Desc)
	} else {
		t.Log("Expected error:", tdStatus)
	}
}
D
DoMyJob 已提交
226

227 228 229 230 231 232 233 234 235 236 237
func setupBenchmarkEnv(recConfig *DataCodecOptions, p2pDelays...time.Duration) (*DataRecoverEngine, []common.Hash, []P2PLocation) {
	hashes, shards := createData(recConfig.DataShards, recConfig.ParityShards)
	p2pNet, p2pNodes := initailP2PMockWithShards(hashes, shards, p2pDelays...)

	codec, _ := NewDataCodec(nil, p2pNet, recConfig)
	return codec, hashes, p2pNodes
}

func BenchmarkPureDataRecovery(b *testing.B) {
	dataShards, parityShards := 5, 3
	_, shards := createData(dataShards, parityShards)
D
DoMyJob 已提交
238 239 240 241 242 243 244 245 246 247 248
	rsEnc, err := reedsolomon.New(dataShards, parityShards)
	if err != nil {
		b.Fatal(err)
	}
	missID := rand.Int() % len(shards)
	shards[missID] = nil

	for n := 0; n < b.N; n++ {
		rsEnc.Reconstruct(shards)
	}
}
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299

func BenchmarkFastP2PDataRecovery(b *testing.B) {
	recConfig := DefaultRecoveryOption()
	codec, hashes, p2pNodes := setupBenchmarkEnv(recConfig, []time.Duration{250,250,250,250,250,250,250}...)

	b.ResetTimer()
	for n := 0; n < b.N; n++ {
		done := make(chan interface{}, 1)
		td := &TaskDescription{
			uint64(rand.Int63()),
			hashes,
			p2pNodes,
			[]uint32{uint32(rand.Intn(len(hashes)))},
		}
		codec.doRecoverData(td, done)
	}
}

func BenchmarkSlowP2PDataRecovery(b *testing.B) {
	recConfig := DefaultRecoveryOption()
	codec, hashes, p2pNodes := setupBenchmarkEnv(recConfig, []time.Duration{25,25,25,25,25,25,25}...)

	b.ResetTimer()
	for n := 0; n < b.N; n++ {
		done := make(chan interface{}, 1)
		td := &TaskDescription{
			uint64(rand.Int63()),
			hashes,
			p2pNodes,
			[]uint32{uint32(rand.Intn(len(hashes)))},
		}
		codec.doRecoverData(td, done)
	}
}

func BenchmarkUnevenP2PDataRecovery(b *testing.B) {
	recConfig := DefaultRecoveryOption()
	codec, hashes, p2pNodes := setupBenchmarkEnv(recConfig, []time.Duration{250,211,173,136,99,62,25}...)

	b.ResetTimer()
	for n := 0; n < b.N; n++ {
		done := make(chan interface{}, 1)
		td := &TaskDescription{
			uint64(rand.Int63()),
			hashes,
			p2pNodes,
			[]uint32{uint32(rand.Intn(len(hashes)))},
		}
		codec.doRecoverData(td, done)
	}
}