etcd_client.go 5.6 KB
Newer Older
D
dongzhihong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at

// http://www.apache.org/licenses/LICENSE-2.0

// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16 17 18
package master

import (
	"context"
19
	"time"
20 21 22 23 24 25 26 27 28 29 30

	"github.com/coreos/etcd/clientv3"
	"github.com/coreos/etcd/clientv3/concurrency"
	log "github.com/sirupsen/logrus"
)

const (
	// DefaultLockPath is the default etcd master lock path.
	DefaultLockPath = "/master/lock"
	// DefaultStatePath is the default etcd key for master state.
	DefaultStatePath = "/master/state"
31 32
	// DefaultAddrPath is the default etcd key for master address.
	DefaultAddrPath = "/master/addr"
33 34
)

35 36
// EtcdClient is the etcd client that the master uses for fault
// tolerance and service registry.
37
type EtcdClient struct {
38 39 40
	lockPath  string
	statePath string
	client    *clientv3.Client
41
	lock      *concurrency.Mutex
H
Helin Wang 已提交
42
	sess      *concurrency.Session
43 44
}

45 46 47
// NewEtcdClient creates a new EtcdClient.
func NewEtcdClient(endpoints []string, addr string, lockPath, addrPath, statePath string, ttlSec int) (*EtcdClient, error) {
	log.Debugf("Connecting to etcd at %v", endpoints)
48
	// TODO(helin): gracefully shutdown etcd store. Because etcd
49 50 51
	// store holds a etcd lock, even though the lock will expire
	// when the lease timeout, we need to implement graceful
	// shutdown to release the lock.
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		return nil, err
	}

	sess, err := concurrency.NewSession(cli, concurrency.WithTTL(ttlSec))
	if err != nil {
		return nil, err
	}

	lock := concurrency.NewMutex(sess, lockPath)
	// It's fine for the lock to get stuck, in this case we have
	// multiple master servers running (only configured to have
Q
Qiao Longfei 已提交
68
	// one master running, but split-brain problem may cause
69 70
	// multiple master servers running), and the cluster management
	// software will kill one of them.
71
	log.Debugf("Trying to acquire lock at %s.", lockPath)
72 73 74 75
	err = lock.Lock(context.TODO())
	if err != nil {
		return nil, err
	}
76 77
	log.Debugf("Successfully acquired lock at %s.", lockPath)

78
	put := clientv3.OpPut(addrPath, addr)
79 80 81 82 83 84 85 86 87 88 89 90 91 92
	resp, err := cli.Txn(context.Background()).If(lock.IsOwner()).Then(put).Commit()
	if err != nil {
		return nil, err
	}

	if !resp.Succeeded {
		log.Fatal("No longer owns the master lock. Exiting.")
	}

	e := &EtcdClient{
		lockPath:  lockPath,
		statePath: statePath,
		client:    cli,
		lock:      lock,
H
Helin Wang 已提交
93
		sess:      sess,
94 95
	}

96 97 98 99
	return e, nil
}

// Save saves the state into the etcd.
100
func (e *EtcdClient) Save(state []byte) error {
101 102 103 104 105 106 107 108
	ctx := context.TODO()
	put := clientv3.OpPut(e.statePath, string(state))
	resp, err := e.client.Txn(ctx).If(e.lock.IsOwner()).Then(put).Commit()
	if err != nil {
		return err
	}

	if !resp.Succeeded {
109 110 111 112
		log.Errorln("No longer owns the lock, trying to lock again")
		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
		err := e.lock.Lock(ctx)
		cancel()
113
		if err != nil {
114 115 116
			// We lost the master lock and can not acquire
			// it back, it means some other master is
			// already started. We don't want cluster
Q
Qiao Longfei 已提交
117
			// management system to kill the master server
118 119 120 121 122 123
			// who is holding the lock and running
			// correctly. So the most feasible solution is
			// to kill current master server. The current
			// state is not saved, but the trainer's RPC
			// call will fail, so the trainer will retry.
			log.Fatalf("Could not acquire the lock at %s: %v. Exiting.", e.lockPath, err)
124 125 126 127 128 129 130 131 132
		}
		log.Infof("Successfully acquired lock at %s.", e.lockPath)
		return e.Save(state)
	}

	return nil
}

// Load loads the state from etcd.
133
func (e *EtcdClient) Load() ([]byte, error) {
134 135 136 137 138 139 140 141 142 143
	ctx := context.TODO()
	get := clientv3.OpGet(e.statePath)

	resp, err := e.client.Txn(ctx).If(e.lock.IsOwner()).Then(get).Commit()
	if err != nil {
		return nil, err
	}

	if !resp.Succeeded {
		log.Errorln("No longer owns the lock, trying to lock and load again.")
144
		err = e.lock.Lock(context.Background())
H
Helin Wang 已提交
145 146 147 148
		if err != nil {
			return nil, err
		}

149 150 151 152 153 154 155 156 157 158 159 160
		return e.Load()
	}

	kvs := resp.Responses[0].GetResponseRange().Kvs
	if len(kvs) == 0 {
		// No state exists
		return nil, nil
	}

	state := kvs[0].Value
	return state, nil
}
161

H
Helin Wang 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
// Shutdown shuts down the etcd client gracefully.
func (e *EtcdClient) Shutdown() error {
	err := e.sess.Close()
	newErr := e.client.Close()
	if newErr != nil {
		if err == nil {
			err = newErr
		} else {
			log.Errorln(newErr)
		}
	}

	return err
}

177
// GetKey gets the value by the specify key.
178 179
func GetKey(c *clientv3.Client, key string, timeout time.Duration) (string, error) {
	ctx, cancel := context.WithTimeout(context.Background(), timeout)
180 181 182 183 184 185 186 187 188 189 190 191 192
	resp, err := c.Get(ctx, key)
	cancel()
	if err != nil {
		return "", err
	}
	kvs := resp.Kvs
	if len(kvs) == 0 {
		return "", nil
	}
	v := kvs[0].Value
	return string(v), nil
}

193 194
// watchKey watches the specify key and send to valChan if there is some event.
func watchKey(c *clientv3.Client, key string, valChan chan<- string) {
195 196 197 198 199 200 201 202 203
	rch := c.Watch(context.Background(), key)
	for wresp := range rch {
		for _, ev := range wresp.Events {
			// if received event is DELETE, the value will be an empty string
			log.Infof("received event %s, %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
			valChan <- string(ev.Kv.Value)
		}
	}
}