From ff9d66c4a9a6fc91233034bfbcaf5c65379a3bed Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 27 Aug 2013 11:54:38 +0200 Subject: [PATCH] Don't over-allocate the sds string for large bulk requests. The call to sdsMakeRoomFor() did not accounted for the amount of data already present in the query buffer, resulting into over-allocation. --- src/networking.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/networking.c b/src/networking.c index be78a19f2..d0d0430c0 100644 --- a/src/networking.c +++ b/src/networking.c @@ -989,13 +989,13 @@ int processMultibulkBuffer(redisClient *c) { if (ll >= REDIS_MBULK_BIG_ARG) { /* If we are going to read a large object from network * try to make it likely that it will start at c->querybuf - * boundary so that we can optimized object creation + * boundary so that we can optimize object creation * avoiding a large copy of data. */ sdsrange(c->querybuf,pos,-1); pos = 0; /* Hint the sds library about the amount of bytes this string is * going to contain. */ - c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2); + c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2-sdslen(c->querybuf)); } c->bulklen = ll; } -- GitLab