diff --git a/TODO b/TODO index d45b35047becca1f9445bfb73692a67e72fdf582..aeb58229b21acaa424a532a23a363a3132ae3ac0 100644 --- a/TODO +++ b/TODO @@ -52,42 +52,18 @@ OPTIMIZATIONS * Redis big lists as linked lists of small ziplists? Possibly a simple heuristic that join near nodes when some node gets smaller than the low_level, and split it into two if gets bigger than high_level. -REPORTING -========= - -* Better INFO output with sections. - RANDOM ====== +* Server should abort when getcwd() fails if there is some kind of persistence configured. Check this in the cron loop. * Clients should be closed as far as the output buffer list is bigger than a given number of elements (configurable in redis.conf) * Should the redis default configuration, and the default redis.conf, just bind 127.0.0.1? KNOWN BUGS ========== -* What happens in the following scenario: - 1) We are reading an AOF file. - 2) SETEX FOO 5 BAR - 3) APPEND FOO ZAP - What happens if between 1 and 2 for some reason (system under huge load - or alike) too many time passes? We should prevent expires while the - AOF is loading. * #519: Slave may have expired keys that were never read in the master (so a DEL is not sent in the replication channel) but are already expired since a lot of time. Maybe after a given delay that is undoubltly greater than the replication link latency we should expire this key on the slave on access? - -DISKSTORE TODO -============== - -* Fix FLUSHALL/FLUSHDB: the queue of pending reads/writes should be handled. -* Check that 00/00 and ff/ff exist at startup, otherwise exit with error. -* Implement sync flush option, where data is written synchronously on disk when a command is executed. -* Implement MULTI/EXEC as transaction abstract API to diskstore.c, with transaction_start, transaction_end, and a journal to recover. -* Stop BGSAVE thread on shutdown and any other condition where the child is killed during normal bgsave. -* Fix RANDOMKEY to really do something interesting -* Fix DBSIZE to really do something interesting -* Add a DEBUG command to check if an entry is or not in memory currently -* dscache.c near 236, kobj = createStringObject... we could use static obj. diff --git a/src/Makefile b/src/Makefile index ee4bfc5001986fa6dee246fcb33b63a43a246273..a45002de2973cea11325fa00ae0eaba9fd669f2c 100644 --- a/src/Makefile +++ b/src/Makefile @@ -169,7 +169,7 @@ dependencies: ../deps/jemalloc/lib/libjemalloc.a: cd ../deps/jemalloc && ./configure $(JEMALLOC_CFLAGS) --with-jemalloc-prefix=je_ --enable-cc-silence && $(MAKE) lib/libjemalloc.a -redis-server: $(OBJ) +redis-server: dependencies $(OBJ) $(QUIET_LINK)$(CC) -o $(PRGNAME) $(CCOPT) $(DEBUG) $(OBJ) $(CCLINK) $(ALLOC_LINK) ../deps/lua/src/liblua.a redis-benchmark: dependencies $(BENCHOBJ) diff --git a/src/config.c b/src/config.c index 6f9657dde8ffde20b0b794f96dc81a1fdc5a171b..d470dab1a1d80614be08ab53557a26c1c7b91eb8 100644 --- a/src/config.c +++ b/src/config.c @@ -508,12 +508,11 @@ void configGetCommand(redisClient *c) { if (stringmatch(pattern,"dir",0)) { char buf[1024]; - addReplyBulkCString(c,"dir"); - if (getcwd(buf,sizeof(buf)) == NULL) { + if (getcwd(buf,sizeof(buf)) == NULL) buf[0] = '\0'; - } else { - addReplyBulkCString(c,buf); - } + + addReplyBulkCString(c,"dir"); + addReplyBulkCString(c,buf); matches++; } if (stringmatch(pattern,"dbfilename",0)) { diff --git a/src/t_hash.c b/src/t_hash.c index 4b9b37d69fed10b3c3586583dc0b38997647e887..83ca5b2754c3fce9d9e9bede7e1a1d95e90ba7f2 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -403,8 +403,11 @@ void hdelCommand(redisClient *c) { for (j = 2; j < c->argc; j++) { if (hashTypeDelete(o,c->argv[j])) { - if (hashTypeLength(o) == 0) dbDelete(c->db,c->argv[1]); deleted++; + if (hashTypeLength(o) == 0) { + dbDelete(c->db,c->argv[1]); + break; + } } } if (deleted) { diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl index 9b043d3f3c3614f8f8154922a1c36dc4bd7dbacc..718bc04ad3a98988cc09c4e91c59965f7baba362 100644 --- a/tests/unit/type/hash.tcl +++ b/tests/unit/type/hash.tcl @@ -235,6 +235,13 @@ start_server {tags {"hash"}} { r hgetall myhash } {b 2} + test {HDEL - hash becomes empty before deleting all specified fields} { + r del myhash + r hmset myhash a 1 b 2 c 3 + assert_equal 3 [r hdel myhash a b c d e] + assert_equal 0 [r exists myhash] + } + test {HEXISTS} { set rv {} set k [lindex [array names smallhash *] 0]