未验证 提交 1ebb8a2b 编写于 作者: S satoru 提交者: GitHub

Fix some errors reported by golangci-lint (#39)

上级 5da313a6
......@@ -89,7 +89,10 @@ func doTesting(store object.ObjectStorage, key string, data []byte) error {
return fmt.Errorf("Failed to get: %s", err)
}
data2, err := ioutil.ReadAll(p)
p.Close()
_ = p.Close()
if err != nil {
return err
}
if !bytes.Equal(data, data2) {
return fmt.Errorf("Read wrong data")
}
......
......@@ -41,8 +41,9 @@ import (
"github.com/urfave/cli/v2"
)
func MakeDaemon() {
godaemon.MakeDaemon(&godaemon.DaemonAttr{})
func MakeDaemon() error {
_, _, err := godaemon.MakeDaemon(&godaemon.DaemonAttr{})
return err
}
func installHandler(mp string) {
......@@ -55,9 +56,9 @@ func installHandler(mp string) {
<-signalChan
go func() {
if runtime.GOOS == "linux" {
exec.Command("umount", mp, "-l").Run()
_ = exec.Command("umount", mp, "-l").Run()
} else if runtime.GOOS == "darwin" {
exec.Command("diskutil", "umount", "force", mp).Run()
_ = exec.Command("diskutil", "umount", "force", mp).Run()
}
}()
go func() {
......@@ -71,12 +72,12 @@ func installHandler(mp string) {
func mount(c *cli.Context) error {
go func() {
for port := 6060; port < 6100; port++ {
http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", port), nil)
_ = http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", port), nil)
}
}()
go func() {
for port := 6070; port < 6100; port++ {
agent.Listen(agent.Options{Addr: fmt.Sprintf("127.0.0.1:%d", port)})
_ = agent.Listen(agent.Options{Addr: fmt.Sprintf("127.0.0.1:%d", port)})
}
}()
if c.Bool("trace") {
......@@ -141,7 +142,9 @@ func mount(c *cli.Context) error {
logger.Infof("mount volume %s at %s", format.Name, mp)
if c.Bool("d") {
MakeDaemon()
if err := MakeDaemon(); err != nil {
logger.Fatalf("Failed to make daemon: %s", err)
}
}
store := chunk.NewCachedStore(blob, chunkConf)
......
......@@ -230,21 +230,6 @@ func (cache *cacheStore) stagePath(key string) string {
return filepath.Join(cache.dir, stagingDir, key)
}
// flush cached block into disk
func (cache *cacheStore) flush() {
for {
w := <-cache.pending
path := cache.cachePath(w.key)
if cache.capacity > 0 && cache.flushPage(path, w.page.Data) == nil {
cache.add(w.key, int32(len(w.page.Data)), uint32(time.Now().Unix()))
}
cache.Lock()
delete(cache.pages, w.key)
cache.Unlock()
w.page.Release()
}
}
func (cache *cacheStore) add(key string, size int32, atime uint32) {
cache.Lock()
defer cache.Unlock()
......
......@@ -62,16 +62,6 @@ func (p *Page) Slice(off, len int) *Page {
return np
}
func (p *Page) isOffHeap() bool {
if p.offheap {
return true
}
if p.dep != nil {
return p.dep.isOffHeap()
}
return false
}
// Acquire increase the refcount
func (p *Page) Acquire() {
atomic.AddInt32(&p.refs, 1)
......
......@@ -24,14 +24,11 @@ import (
"time"
"github.com/juicedata/juicefs/pkg/meta"
"github.com/juicedata/juicefs/pkg/utils"
"github.com/juicedata/juicefs/pkg/vfs"
vfs "github.com/juicedata/juicefs/pkg/vfs"
"github.com/hanwen/go-fuse/v2/fuse"
)
var logger = utils.GetLogger("juicefs")
type JFS struct {
fuse.RawFileSystem
cacheMode int
......@@ -259,7 +256,7 @@ func (fs *JFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buf []byte) (fuse.R
func (fs *JFS) Release(cancel <-chan struct{}, in *fuse.ReleaseIn) {
ctx := newContext(cancel, &in.InHeader)
defer releaseContext(ctx)
vfs.Release(ctx, Ino(in.NodeId), in.Fh)
_ = vfs.Release(ctx, Ino(in.NodeId), in.Fh)
}
func (fs *JFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (written uint32, code fuse.Status) {
......@@ -405,7 +402,9 @@ func (fs *JFS) StatFs(cancel <-chan struct{}, in *fuse.InHeader, out *fuse.Statf
}
func Main(conf *vfs.Config, options string, attrcacheto_, entrycacheto_, direntrycacheto_ float64) error {
syscall.Setpriority(syscall.PRIO_PROCESS, os.Getpid(), -19)
if err := syscall.Setpriority(syscall.PRIO_PROCESS, os.Getpid(), -19); err != nil {
return err
}
imp := NewJFS()
imp.attrTimeout = time.Millisecond * time.Duration(attrcacheto_*1000)
......
......@@ -382,7 +382,7 @@ func (r *redisMeta) Truncate(ctx Context, inode Ino, flags uint8, length uint64,
_, err = tx.TxPipelined(c, func(pipe redis.Pipeliner) error {
pipe.Set(c, r.inodeKey(inode), r.marshal(&t), 0)
if old > length {
pipe.ZAdd(c, delchunks, &redis.Z{float64(now.Unix()), r.delChunks(inode, length, old, maxchunk)})
pipe.ZAdd(c, delchunks, &redis.Z{Score: float64(now.Unix()), Member: r.delChunks(inode, length, old, maxchunk)})
} else if length > (old/ChunkSize+1)*ChunkSize {
// zero out last chunks
w := utils.NewBuffer(24)
......@@ -408,9 +408,9 @@ func (r *redisMeta) Truncate(ctx Context, inode Ino, flags uint8, length uint64,
const (
// fallocate
fallocKeepSize = 0x01
fallocPunchHole = 0x02
fallocNoHideStale = 0x04 // reserved
fallocKeepSize = 0x01
fallocPunchHole = 0x02
// RESERVED: fallocNoHideStale = 0x04
fallocCollapesRange = 0x08
fallocZeroRange = 0x10
fallocInsertRange = 0x20
......@@ -718,7 +718,7 @@ func (r *redisMeta) Unlink(ctx Context, parent Ino, name string) syscall.Errno {
pipe.Set(c, r.inodeKey(inode), r.marshal(&attr), 0)
pipe.SAdd(c, r.sessionKey(r.sid), strconv.Itoa(int(inode)))
} else {
pipe.ZAdd(c, delchunks, &redis.Z{float64(now.Unix()), r.delChunks(inode, 0, attr.Length, maxchunk)})
pipe.ZAdd(c, delchunks, &redis.Z{Score: float64(now.Unix()), Member: r.delChunks(inode, 0, attr.Length, maxchunk)})
pipe.Del(c, r.inodeKey(inode))
pipe.IncrBy(c, usedSpace, -align4K(attr.Length))
}
......@@ -939,7 +939,7 @@ func (r *redisMeta) Rename(ctx Context, parentSrc Ino, nameSrc string, parentDst
pipe.Set(c, r.inodeKey(dino), r.marshal(&tattr), 0)
pipe.SAdd(c, r.sessionKey(r.sid), strconv.Itoa(int(dino)))
} else {
pipe.ZAdd(c, delchunks, &redis.Z{float64(now.Unix()), r.delChunks(dino, 0, tattr.Length, maxchunk)})
pipe.ZAdd(c, delchunks, &redis.Z{Score: float64(now.Unix()), Member: r.delChunks(dino, 0, tattr.Length, maxchunk)})
pipe.Del(c, r.inodeKey(dino))
pipe.IncrBy(c, usedSpace, -align4K(tattr.Length))
}
......@@ -1077,14 +1077,14 @@ func (r *redisMeta) cleanStaleSession(sid int64) {
func (r *redisMeta) cleanStaleSessions() {
now := time.Now()
rng := &redis.ZRangeBy{"", strconv.Itoa(int(now.Add(time.Minute * -10).Unix())), 0, 100}
rng := &redis.ZRangeBy{Max: strconv.Itoa(int(now.Add(time.Minute * -10).Unix())), Count: 100}
staleSessions, _ := r.rdb.ZRangeByScore(c, allSessions, rng).Result()
for _, ssid := range staleSessions {
sid, _ := strconv.Atoi(ssid)
r.cleanStaleSession(int64(sid))
}
rng = &redis.ZRangeBy{"", strconv.Itoa(int(now.Add(time.Minute * -3).Unix())), 0, 100}
rng = &redis.ZRangeBy{Max: strconv.Itoa(int(now.Add(time.Minute * -3).Unix())), Count: 100}
staleSessions, err := r.rdb.ZRangeByScore(c, allSessions, rng).Result()
if err != nil || len(staleSessions) == 0 {
return
......@@ -1116,7 +1116,7 @@ func (r *redisMeta) cleanStaleSessions() {
func (r *redisMeta) refreshSession() {
for {
now := time.Now()
r.rdb.ZAdd(c, allSessions, &redis.Z{float64(now.Unix()), strconv.Itoa(int(r.sid))})
r.rdb.ZAdd(c, allSessions, &redis.Z{Score: float64(now.Unix()), Member: strconv.Itoa(int(r.sid))})
go r.cleanStaleSessions()
time.Sleep(time.Minute)
}
......@@ -1137,7 +1137,7 @@ func (r *redisMeta) deleteInode(inode Ino) error {
}
r.parseAttr(a, &attr)
_, err = r.rdb.TxPipelined(c, func(pipe redis.Pipeliner) error {
pipe.ZAdd(c, delchunks, &redis.Z{float64(time.Now().Unix()), r.delChunks(inode, 0, attr.Length, maxchunk)})
pipe.ZAdd(c, delchunks, &redis.Z{Score: float64(time.Now().Unix()), Member: r.delChunks(inode, 0, attr.Length, maxchunk)})
pipe.Del(c, r.inodeKey(inode))
pipe.IncrBy(c, usedSpace, -align4K(attr.Length))
return nil
......@@ -1195,10 +1195,10 @@ func (r *redisMeta) buildSlice(ss []*slice) []Slice {
var chunks []Slice
root.visit(func(s *slice) {
if s.pos > pos {
chunks = append(chunks, Slice{0, s.pos - pos, 0, s.pos - pos})
chunks = append(chunks, Slice{Size: s.pos - pos, Len: s.pos - pos})
pos = s.pos
}
chunks = append(chunks, Slice{s.chunkid, s.size, s.off, s.len})
chunks = append(chunks, Slice{Chunkid: s.chunkid, Size: s.size, Off: s.off, Len: s.len})
pos += s.len
})
return chunks
......@@ -1280,7 +1280,7 @@ func (r *redisMeta) delChunks(inode Ino, start, end, maxchunkid uint64) string {
func (r *redisMeta) cleanupChunks() {
for {
now := time.Now()
members, _ := r.rdb.ZRangeByScore(c, delchunks, &redis.ZRangeBy{strconv.Itoa(0), strconv.Itoa(int(now.Add(time.Hour).Unix())), 0, 1000}).Result()
members, _ := r.rdb.ZRangeByScore(c, delchunks, &redis.ZRangeBy{Min: strconv.Itoa(0), Max: strconv.Itoa(int(now.Add(time.Hour).Unix())), Count: 1000}).Result()
for _, member := range members {
ps := strings.Split(member, ":")
if len(ps) != 4 {
......@@ -1339,7 +1339,7 @@ func (r *redisMeta) deleteChunks(inode Ino, start, end, maxchunk uint64) {
logger.Warnf("delete chunk %d fail: %s, retry later", inode, err)
now := time.Now()
key := r.delChunks(inode, uint64((indx+uint32(j)))*ChunkSize, uint64((indx+uint32(j)+1))*ChunkSize, maxchunk)
r.rdb.ZAdd(c, delchunks, &redis.Z{float64(now.Unix()), key})
r.rdb.ZAdd(c, delchunks, &redis.Z{Score: float64(now.Unix()), Member: key})
return
}
r.txn(func(tx *redis.Tx) error {
......
......@@ -85,7 +85,7 @@ func TestRedisClient(t *testing.T) {
if st := m.NewChunk(ctx, inode, 0, 0, &chunkid); st != 0 {
t.Fatalf("write chunk: %s", st)
}
var s = meta.Slice{chunkid, 100, 0, 100}
var s = meta.Slice{Chunkid: chunkid, Size: 100, Len: 100}
if st := m.Write(ctx, inode, 0, 100, s); st != 0 {
t.Fatalf("write end: %s", st)
}
......@@ -239,7 +239,7 @@ func TestCompaction(t *testing.T) {
}
defer m.Unlink(ctx, 1, "f")
for i := 0; i < 50; i++ {
if st := m.Write(ctx, inode, 0, uint32(i*100), meta.Slice{uint64(i) + 1, 100, 0, 100}); st != 0 {
if st := m.Write(ctx, inode, 0, uint32(i*100), meta.Slice{Chunkid: uint64(i) + 1, Size: 100, Len: 100}); st != 0 {
t.Fatalf("write %d: %s", i, st)
}
time.Sleep(time.Millisecond)
......@@ -273,7 +273,10 @@ func TestConcurrentWrite(t *testing.T) {
t.Logf("redis is not available: %s", err)
t.Skip()
}
m.Init(meta.Format{Name: "test"})
err = m.Init(meta.Format{Name: "test"})
if err != nil {
t.Fatalf("Failed to initialize meta: %s", err)
}
ctx := meta.Background
var inode meta.Ino
var attr = &meta.Attr{}
......@@ -290,7 +293,7 @@ func TestConcurrentWrite(t *testing.T) {
go func(indx uint32) {
defer g.Done()
for j := 0; j < 1000; j++ {
var slice = meta.Slice{1, 100, 0, 100}
var slice = meta.Slice{Chunkid: 1, Size: 100, Len: 100}
st := m.Write(ctx, inode, indx, 0, slice)
if st != 0 {
errno = st
......
......@@ -230,23 +230,6 @@ func (s *sliceReader) run() {
}
}
func (s *sliceReader) invalidate() {
switch s.state {
case NEW:
case BUSY:
s.state = REFRESH
// TODO: interrupt reader
case READY:
if s.refs > 0 {
s.state = NEW
go s.run()
} else {
s.state = INVALID
s.delete() // nobody wants it anymore, so delete it
}
}
}
func (s *sliceReader) drop() {
if s.state <= BREAK {
if s.refs == 0 {
......
......@@ -78,7 +78,6 @@ func Lookup(ctx Context, parent Ino, name string) (entry *meta.Entry, err syscal
var attr = &Attr{}
if parent == rootID {
if nleng == 2 && name[0] == '.' && name[1] == '.' {
nleng = 1
name = name[:1]
}
n := getInternalNodeByName(name)
......
......@@ -190,7 +190,7 @@ func (c *chunkWriter) commitThread() {
f.Unlock()
if err == 0 {
var ss = meta.Slice{s.id, s.length, s.soff, s.slen}
var ss = meta.Slice{Chunkid: s.id, Size: s.length, Off: s.soff, Len: s.slen}
err = f.w.m.Write(meta.Background, f.inode, c.indx, s.off, ss)
}
......@@ -368,9 +368,7 @@ func (f *fileWriter) GetLength() uint64 {
func (f *fileWriter) Truncate(length uint64) {
f.Lock()
defer f.Unlock()
if length < f.length {
// TODO: truncate write buffer
}
// TODO: truncate write buffer if length < f.length
f.length = length
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册