Merge branch 'unstable' into conduct

This commit is contained in:
Itamar Haber 2020-07-10 16:22:58 +03:00 committed by GitHub
commit a6504a16f7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 488 additions and 196 deletions

View File

@ -31,7 +31,7 @@ jobs:
- name: make
run: make
biuld-32bit:
build-32bit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1

2
BUGS
View File

@ -1 +1 @@
Please check https://github.com/antirez/redis/issues
Please check https://github.com/redis/redis/issues

2
TLS.md
View File

@ -68,8 +68,6 @@ but there are probably other good reasons to improve that part anyway.
To-Do List
----------
- [ ] Add session caching support. Check if/how it's handled by clients to
assess how useful/important it is.
- [ ] redis-benchmark support. The current implementation is a mix of using
hiredis for parsing and basic networking (establishing connections), but
directly manipulating sockets for most actions. This will need to be cleaned

2
deps/README.md vendored
View File

@ -47,7 +47,7 @@ Hiredis
Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care:
1. Check with diff if hiredis API changed and what impact it could have in Redis.
2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible.
2. Make sure that the SDS library inside Hiredis and inside Redis are compatible.
3. After the upgrade, run the Redis Sentinel test.
4. Check manually that redis-cli and redis-benchmark behave as expecteed, since we have no tests for CLI utilities currently.

View File

@ -199,6 +199,22 @@ tcp-keepalive 300
#
# tls-prefer-server-ciphers yes
# By default, TLS session caching is enabled to allow faster and less expensive
# reconnections by clients that support it. Use the following directive to disable
# caching.
#
# tls-session-caching no
# Change the default number of TLS sessions cached. A zero value sets the cache
# to unlimited size. The default size is 20480.
#
# tls-session-cache-size 5000
# Change the default timeout of cached TLS sessions. The default timeout is 300
# seconds.
#
# tls-session-cache-timeout 60
################################# GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.

View File

@ -192,9 +192,21 @@ ifeq ($(MALLOC),jemalloc)
endif
ifeq ($(BUILD_TLS),yes)
FINAL_CFLAGS+=-DUSE_OPENSSL $(OPENSSL_CFLAGS)
FINAL_LDFLAGS+=$(OPENSSL_LDFLAGS)
FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a -lssl -lcrypto
FINAL_CFLAGS+=-DUSE_OPENSSL $(OPENSSL_CFLAGS)
FINAL_LDFLAGS+=$(OPENSSL_LDFLAGS)
LIBSSL_PKGCONFIG := $(shell $(PKG_CONFIG) --exists libssl && echo $$?)
ifeq ($(LIBSSL_PKGCONFIG),0)
LIBSSL_LIBS=$(shell $(PKG_CONFIG) --libs libssl)
else
LIBSSL_LIBS=-lssl
endif
LIBCRYPTO_PKGCONFIG := $(shell $(PKG_CONFIG) --exists libcrypto && echo $$?)
ifeq ($(LIBCRYPTO_PKGCONFIG),0)
LIBCRYPTO_LIBS=$(shell $(PKG_CONFIG) --libs libcrypto)
else
LIBCRYPTO_LIBS=-lcrypto
endif
FINAL_LIBS += ../deps/hiredis/libhiredis_ssl.a $(LIBSSL_LIBS) $(LIBCRYPTO_LIBS)
endif
REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS)

View File

@ -759,11 +759,12 @@ void bitopCommand(client *c) {
setKey(c,c->db,targetkey,o);
notifyKeyspaceEvent(NOTIFY_STRING,"set",targetkey,c->db->id);
decrRefCount(o);
server.dirty++;
} else if (dbDelete(c->db,targetkey)) {
signalModifiedKey(c,c->db,targetkey);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",targetkey,c->db->id);
server.dirty++;
}
server.dirty++;
addReplyLongLong(c,maxlen); /* Return the output string length in bytes. */
}

View File

@ -4988,7 +4988,8 @@ void restoreCommand(client *c) {
}
/* Make sure this key does not already exist here... */
if (!replace && lookupKeyWrite(c->db,c->argv[1]) != NULL) {
robj *key = c->argv[1];
if (!replace && lookupKeyWrite(c->db,key) != NULL) {
addReply(c,shared.busykeyerr);
return;
}
@ -5010,24 +5011,37 @@ void restoreCommand(client *c) {
rioInitWithBuffer(&payload,c->argv[3]->ptr);
if (((type = rdbLoadObjectType(&payload)) == -1) ||
((obj = rdbLoadObject(type,&payload,c->argv[1]->ptr)) == NULL))
((obj = rdbLoadObject(type,&payload,key->ptr)) == NULL))
{
addReplyError(c,"Bad data format");
return;
}
/* Remove the old key if needed. */
if (replace) dbDelete(c->db,c->argv[1]);
int deleted = 0;
if (replace)
deleted = dbDelete(c->db,key);
if (ttl && !absttl) ttl+=mstime();
if (ttl && checkAlreadyExpired(ttl)) {
if (deleted) {
rewriteClientCommandVector(c,2,shared.del,key);
signalModifiedKey(c,c->db,key);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id);
server.dirty++;
}
addReply(c, shared.ok);
return;
}
/* Create the key and set the TTL if any */
dbAdd(c->db,c->argv[1],obj);
dbAdd(c->db,key,obj);
if (ttl) {
if (!absttl) ttl+=mstime();
setExpire(c,c->db,c->argv[1],ttl);
setExpire(c,c->db,key,ttl);
}
objectSetLRUOrLFU(obj,lfu_freq,lru_idle,lru_clock,1000);
signalModifiedKey(c,c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_GENERIC,"restore",c->argv[1],c->db->id);
signalModifiedKey(c,c->db,key);
notifyKeyspaceEvent(NOTIFY_GENERIC,"restore",key,c->db->id);
addReply(c,shared.ok);
server.dirty++;
}

View File

@ -2071,7 +2071,7 @@ static int updateTlsCfg(char *val, char *prev, char **err) {
UNUSED(prev);
UNUSED(err);
if (tlsConfigure(&server.tls_ctx_config) == C_ERR) {
*err = "Unable to configure tls-cert-file. Check server logs.";
*err = "Unable to update TLS configuration. Check server logs.";
return 0;
}
return 1;
@ -2081,6 +2081,12 @@ static int updateTlsCfgBool(int val, int prev, char **err) {
UNUSED(prev);
return updateTlsCfg(NULL, NULL, err);
}
static int updateTlsCfgInt(long long val, long long prev, char **err) {
UNUSED(val);
UNUSED(prev);
return updateTlsCfg(NULL, NULL, err);
}
#endif /* USE_OPENSSL */
standardConfig configs[] = {
@ -2216,10 +2222,13 @@ standardConfig configs[] = {
#ifdef USE_OPENSSL
createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, NULL), /* TCP port. */
createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20*1024, INTEGER_CONFIG, NULL, updateTlsCfgInt),
createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt),
createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, NULL),
createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, NULL),
createBoolConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, server.tls_auth_clients, 1, NULL, NULL),
createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool),
createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool),
createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-key-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-dh-params-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.dh_params_file, NULL, NULL, updateTlsCfg),

View File

@ -378,6 +378,7 @@ void debugCommand(client *c) {
"DEBUG PROTOCOL [string|integer|double|bignum|null|array|set|map|attrib|push|verbatim|true|false]",
"ERROR <string> -- Return a Redis protocol error with <string> as message. Useful for clients unit tests to simulate Redis errors.",
"LOG <message> -- write message to the server log.",
"LEAK <string> -- Create a memory leak of the input string.",
"HTSTATS <dbid> -- Return hash table statistics of the specified Redis database.",
"HTSTATS-KEY <key> -- Like htstats but for the hash table stored as key's value.",
"LOADAOF -- Flush the AOF buffers on disk and reload the AOF in memory.",
@ -430,6 +431,9 @@ NULL
} else if (!strcasecmp(c->argv[1]->ptr,"log") && c->argc == 3) {
serverLog(LL_WARNING, "DEBUG LOG: %s", (char*)c->argv[2]->ptr);
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"leak") && c->argc == 3) {
sdsdup(c->argv[2]->ptr);
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"reload")) {
int flush = 1, save = 1;
int flags = RDBFLAGS_NONE;
@ -1569,7 +1573,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
serverLogRaw(LL_WARNING|LL_RAW,
"\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n"
" Please report the crash by opening an issue on github:\n\n"
" http://github.com/antirez/redis/issues\n\n"
" http://github.com/redis/redis/issues\n\n"
" Suspect RAM error? Use redis-server --test-memory to verify it.\n\n"
);

View File

@ -348,7 +348,7 @@ long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) {
sdsele = ln->value;
if ((newsds = activeDefragSds(sdsele))) {
/* When defragging an sds value, we need to update the dict key */
uint64_t hash = dictGetHash(d, sdsele);
uint64_t hash = dictGetHash(d, newsds);
replaceSateliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged);
ln->value = newsds;
defragged++;

View File

@ -475,6 +475,16 @@ void flushSlaveKeysWithExpireList(void) {
}
}
int checkAlreadyExpired(long long when) {
/* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past
* should never be executed as a DEL when load the AOF or in the context
* of a slave instance.
*
* Instead we add the already expired key to the database with expire time
* (possibly in the past) and wait for an explicit DEL from the master. */
return (when <= mstime() && !server.loading && !server.masterhost);
}
/*-----------------------------------------------------------------------------
* Expires Commands
*----------------------------------------------------------------------------*/
@ -502,13 +512,7 @@ void expireGenericCommand(client *c, long long basetime, int unit) {
return;
}
/* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past
* should never be executed as a DEL when load the AOF or in the context
* of a slave instance.
*
* Instead we take the other branch of the IF statement setting an expire
* (possibly in the past) and wait for an explicit DEL from the master. */
if (when <= mstime() && !server.loading && !server.masterhost) {
if (checkAlreadyExpired(when)) {
robj *aux;
int deleted = server.lazyfree_lazy_expire ? dbAsyncDelete(c->db,key) :

View File

@ -1989,6 +1989,7 @@ static void repl(void) {
if (argv == NULL) {
printf("Invalid argument(s)\n");
fflush(stdout);
linenoiseFree(line);
continue;
} else if (argc > 0) {
@ -6784,10 +6785,53 @@ void sendCapa() {
sendReplconf("capa", "eof");
}
/* Wrapper around hiredis to allow arbitrary reads and writes.
*
* We piggybacks on top of hiredis to achieve transparent TLS support,
* and use its internal buffers so it can co-exist with commands
* previously/later issued on the connection.
*
* Interface is close to enough to read()/write() so things should mostly
* work transparently.
*/
/* Write a raw buffer through a redisContext. If we already have something
* in the buffer (leftovers from hiredis operations) it will be written
* as well.
*/
static ssize_t writeConn(redisContext *c, const char *buf, size_t buf_len)
{
int done = 0;
c->obuf = sdscatlen(c->obuf, buf, buf_len);
if (redisBufferWrite(c, &done) == REDIS_ERR) {
sdsrange(c->obuf, 0, -(buf_len+1));
if (!(c->flags & REDIS_BLOCK))
errno = EAGAIN;
return -1;
}
size_t left = sdslen(c->obuf);
sdsclear(c->obuf);
if (!done) {
return buf_len - left;
}
return buf_len;
}
/* Read raw bytes through a redisContext. The read operation is not greedy
* and may not fill the buffer entirely.
*/
static ssize_t readConn(redisContext *c, char *buf, size_t len)
{
return c->funcs->read(c, buf, len);
}
/* Sends SYNC and reads the number of bytes in the payload. Used both by
* slaveMode() and getRDB().
* returns 0 in case an EOF marker is used. */
unsigned long long sendSync(int fd, char *out_eof) {
unsigned long long sendSync(redisContext *c, char *out_eof) {
/* To start we need to send the SYNC command and return the payload.
* The hiredis client lib does not understand this part of the protocol
* and we don't want to mess with its buffers, so everything is performed
@ -6796,7 +6840,7 @@ unsigned long long sendSync(int fd, char *out_eof) {
ssize_t nread;
/* Send the SYNC command. */
if (write(fd,"SYNC\r\n",6) != 6) {
if (writeConn(c, "SYNC\r\n", 6) != 6) {
fprintf(stderr,"Error writing to master\n");
exit(1);
}
@ -6804,7 +6848,7 @@ unsigned long long sendSync(int fd, char *out_eof) {
/* Read $<payload>\r\n, making sure to read just up to "\n" */
p = buf;
while(1) {
nread = read(fd,p,1);
nread = readConn(c,p,1);
if (nread <= 0) {
fprintf(stderr,"Error reading bulk length while SYNCing\n");
exit(1);
@ -6825,11 +6869,10 @@ unsigned long long sendSync(int fd, char *out_eof) {
}
static void slaveMode(void) {
int fd = context->fd;
static char eofmark[RDB_EOF_MARK_SIZE];
static char lastbytes[RDB_EOF_MARK_SIZE];
static int usemark = 0;
unsigned long long payload = sendSync(fd, eofmark);
unsigned long long payload = sendSync(context,eofmark);
char buf[1024];
int original_output = config.output;
@ -6849,7 +6892,7 @@ static void slaveMode(void) {
while(payload) {
ssize_t nread;
nread = read(fd,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload);
nread = readConn(context,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload);
if (nread <= 0) {
fprintf(stderr,"Error reading RDB payload while SYNCing\n");
exit(1);
@ -6892,14 +6935,15 @@ static void slaveMode(void) {
/* This function implements --rdb, so it uses the replication protocol in order
* to fetch the RDB file from a remote server. */
static void getRDB(clusterManagerNode *node) {
int s, fd;
int fd;
redisContext *s;
char *filename;
if (node != NULL) {
assert(node->context);
s = node->context->fd;
s = node->context;
filename = clusterManagerGetNodeRDBFilename(node);
} else {
s = context->fd;
s = context;
filename = config.rdb_filename;
}
static char eofmark[RDB_EOF_MARK_SIZE];
@ -6934,7 +6978,7 @@ static void getRDB(clusterManagerNode *node) {
while(payload) {
ssize_t nread, nwritten;
nread = read(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload);
nread = readConn(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload);
if (nread <= 0) {
fprintf(stderr,"I/O Error reading RDB payload from socket\n");
exit(1);
@ -6968,7 +7012,7 @@ static void getRDB(clusterManagerNode *node) {
} else {
fprintf(stderr,"Transfer finished with success.\n");
}
close(s); /* Close the file descriptor ASAP as fsync() may take time. */
redisFree(s); /* Close the file descriptor ASAP as fsync() may take time. */
fsync(fd);
close(fd);
fprintf(stderr,"Transfer finished with success.\n");
@ -6985,11 +7029,9 @@ static void getRDB(clusterManagerNode *node) {
#define PIPEMODE_WRITE_LOOP_MAX_BYTES (128*1024)
static void pipeMode(void) {
int fd = context->fd;
long long errors = 0, replies = 0, obuf_len = 0, obuf_pos = 0;
char ibuf[1024*16], obuf[1024*16]; /* Input and output buffers */
char obuf[1024*16]; /* Output buffer */
char aneterr[ANET_ERR_LEN];
redisReader *reader = redisReaderCreate();
redisReply *reply;
int eof = 0; /* True once we consumed all the standard input. */
int done = 0;
@ -6999,47 +7041,38 @@ static void pipeMode(void) {
srand(time(NULL));
/* Use non blocking I/O. */
if (anetNonBlock(aneterr,fd) == ANET_ERR) {
if (anetNonBlock(aneterr,context->fd) == ANET_ERR) {
fprintf(stderr, "Can't set the socket in non blocking mode: %s\n",
aneterr);
exit(1);
}
context->flags &= ~REDIS_BLOCK;
/* Transfer raw protocol and read replies from the server at the same
* time. */
while(!done) {
int mask = AE_READABLE;
if (!eof || obuf_len != 0) mask |= AE_WRITABLE;
mask = aeWait(fd,mask,1000);
mask = aeWait(context->fd,mask,1000);
/* Handle the readable state: we can read replies from the server. */
if (mask & AE_READABLE) {
ssize_t nread;
int read_error = 0;
/* Read from socket and feed the hiredis reader. */
do {
nread = read(fd,ibuf,sizeof(ibuf));
if (nread == -1 && errno != EAGAIN && errno != EINTR) {
fprintf(stderr, "Error reading from the server: %s\n",
strerror(errno));
if (!read_error && redisBufferRead(context) == REDIS_ERR) {
read_error = 1;
break;
}
if (nread > 0) {
redisReaderFeed(reader,ibuf,nread);
last_read_time = time(NULL);
}
} while(nread > 0);
/* Consume replies. */
do {
if (redisReaderGetReply(reader,(void**)&reply) == REDIS_ERR) {
reply = NULL;
if (redisGetReply(context, (void **) &reply) == REDIS_ERR) {
fprintf(stderr, "Error reading replies from server\n");
exit(1);
}
if (reply) {
last_read_time = time(NULL);
if (reply->type == REDIS_REPLY_ERROR) {
fprintf(stderr,"%s\n", reply->str);
errors++;
@ -7072,7 +7105,7 @@ static void pipeMode(void) {
while(1) {
/* Transfer current buffer to server. */
if (obuf_len != 0) {
ssize_t nwritten = write(fd,obuf+obuf_pos,obuf_len);
ssize_t nwritten = writeConn(context,obuf+obuf_pos,obuf_len);
if (nwritten == -1) {
if (errno != EAGAIN && errno != EINTR) {
@ -7088,6 +7121,10 @@ static void pipeMode(void) {
loop_nwritten += nwritten;
if (obuf_len != 0) break; /* Can't accept more data. */
}
if (context->err) {
fprintf(stderr, "Server I/O Error: %s\n", context->errstr);
exit(1);
}
/* If buffer is empty, load from stdin. */
if (obuf_len == 0 && !eof) {
ssize_t nread = read(STDIN_FILENO,obuf,sizeof(obuf));
@ -7138,7 +7175,6 @@ static void pipeMode(void) {
break;
}
}
redisReaderFree(reader);
printf("errors: %lld, replies: %lld\n", errors, replies);
if (errors)
exit(1);
@ -7246,7 +7282,9 @@ static void getKeyTypes(dict *types_dict, redisReply *keys, typeinfo **types) {
/* Pipeline TYPE commands */
for(i=0;i<keys->elements;i++) {
redisAppendCommand(context, "TYPE %s", keys->element[i]->str);
const char* argv[] = {"TYPE", keys->element[i]->str};
size_t lens[] = {4, keys->element[i]->len};
redisAppendCommandArgv(context, 2, argv, lens);
}
/* Retrieve types */
@ -7292,15 +7330,21 @@ static void getKeySizes(redisReply *keys, typeinfo **types,
if(!types[i] || (!types[i]->sizecmd && !memkeys))
continue;
if (!memkeys)
redisAppendCommand(context, "%s %s",
types[i]->sizecmd, keys->element[i]->str);
else if (memkeys_samples==0)
redisAppendCommand(context, "%s %s %s",
"MEMORY", "USAGE", keys->element[i]->str);
else
redisAppendCommand(context, "%s %s %s SAMPLES %u",
"MEMORY", "USAGE", keys->element[i]->str, memkeys_samples);
if (!memkeys) {
const char* argv[] = {types[i]->sizecmd, keys->element[i]->str};
size_t lens[] = {strlen(types[i]->sizecmd), keys->element[i]->len};
redisAppendCommandArgv(context, 2, argv, lens);
} else if (memkeys_samples==0) {
const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str};
size_t lens[] = {6, 5, keys->element[i]->len};
redisAppendCommandArgv(context, 3, argv, lens);
} else {
sds samplesstr = sdsfromlonglong(memkeys_samples);
const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str, "SAMPLES", samplesstr};
size_t lens[] = {6, 5, keys->element[i]->len, 7, sdslen(samplesstr)};
redisAppendCommandArgv(context, 5, argv, lens);
sdsfree(samplesstr);
}
}
/* Retrieve sizes */
@ -7396,20 +7440,20 @@ static void findBigKeys(int memkeys, unsigned memkeys_samples) {
sampled++;
if(type->biggest<sizes[i]) {
printf(
"[%05.2f%%] Biggest %-6s found so far '%s' with %llu %s\n",
pct, type->name, keys->element[i]->str, sizes[i],
!memkeys? type->sizeunit: "bytes");
/* Keep track of biggest key name for this type */
if (type->biggest_key)
sdsfree(type->biggest_key);
type->biggest_key = sdsnew(keys->element[i]->str);
type->biggest_key = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len);
if(!type->biggest_key) {
fprintf(stderr, "Failed to allocate memory for key!\n");
exit(1);
}
printf(
"[%05.2f%%] Biggest %-6s found so far '%s' with %llu %s\n",
pct, type->name, type->biggest_key, sizes[i],
!memkeys? type->sizeunit: "bytes");
/* Keep track of the biggest size for this type */
type->biggest = sizes[i];
}
@ -7473,21 +7517,27 @@ static void getKeyFreqs(redisReply *keys, unsigned long long *freqs) {
/* Pipeline OBJECT freq commands */
for(i=0;i<keys->elements;i++) {
redisAppendCommand(context, "OBJECT freq %s", keys->element[i]->str);
const char* argv[] = {"OBJECT", "FREQ", keys->element[i]->str};
size_t lens[] = {6, 4, keys->element[i]->len};
redisAppendCommandArgv(context, 3, argv, lens);
}
/* Retrieve freqs */
for(i=0;i<keys->elements;i++) {
if(redisGetReply(context, (void**)&reply)!=REDIS_OK) {
sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len);
fprintf(stderr, "Error getting freq for key '%s' (%d: %s)\n",
keys->element[i]->str, context->err, context->errstr);
keyname, context->err, context->errstr);
sdsfree(keyname);
exit(1);
} else if(reply->type != REDIS_REPLY_INTEGER) {
if(reply->type == REDIS_REPLY_ERROR) {
fprintf(stderr, "Error: %s\n", reply->str);
exit(1);
} else {
fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keys->element[i]->str);
sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len);
fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keyname);
sdsfree(keyname);
freqs[i] = 0;
}
} else {
@ -7558,10 +7608,10 @@ static void findHotKeys(void) {
memmove(hotkeys,hotkeys+1,sizeof(hotkeys[0])*k);
}
counters[k] = freqs[i];
hotkeys[k] = sdsnew(keys->element[i]->str);
hotkeys[k] = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len);
printf(
"[%05.2f%%] Hot key '%s' found so far with counter %llu\n",
pct, keys->element[i]->str, freqs[i]);
pct, hotkeys[k], freqs[i]);
}
/* Sleep if we've been directed to do so */

View File

@ -1011,6 +1011,9 @@ typedef struct redisTLSContextConfig {
char *ciphers;
char *ciphersuites;
int prefer_server_ciphers;
int session_caching;
int session_cache_size;
int session_cache_timeout;
} redisTLSContextConfig;
/*-----------------------------------------------------------------------------
@ -2070,6 +2073,7 @@ void propagateExpire(redisDb *db, robj *key, int lazy);
int expireIfNeeded(redisDb *db, robj *key);
long long getExpire(redisDb *db, robj *key);
void setExpire(client *c, redisDb *db, robj *key, long long when);
int checkAlreadyExpired(long long when);
robj *lookupKey(redisDb *db, robj *key, int flags);
robj *lookupKeyRead(redisDb *db, robj *key);
robj *lookupKeyWrite(redisDb *db, robj *key);

View File

@ -148,9 +148,6 @@ void tlsInit(void) {
}
pending_list = listCreate();
/* Server configuration */
server.tls_auth_clients = 1; /* Secure by default */
}
/* Attempt to configure/reconfigure TLS. This operation is atomic and will
@ -184,6 +181,15 @@ int tlsConfigure(redisTLSContextConfig *ctx_config) {
SSL_CTX_set_options(ctx, SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS);
#endif
if (ctx_config->session_caching) {
SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER);
SSL_CTX_sess_set_cache_size(ctx, ctx_config->session_cache_size);
SSL_CTX_set_timeout(ctx, ctx_config->session_cache_timeout);
SSL_CTX_set_session_id_context(ctx, (void *) "redis", 5);
} else {
SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_OFF);
}
int protocols = parseProtocolsConfig(ctx_config->protocols);
if (protocols == -1) goto error;
@ -337,9 +343,7 @@ connection *connCreateAcceptedTLS(int fd, int require_auth) {
conn->c.state = CONN_STATE_ACCEPTING;
if (!require_auth) {
/* We still verify certificates if provided, but don't require them.
*/
SSL_set_verify(conn->ssl, SSL_VERIFY_PEER, NULL);
SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL);
}
SSL_set_fd(conn->ssl, conn->c.fd);

View File

@ -280,7 +280,8 @@ start_server {} {
set sync_partial_err [status $R($master_id) sync_partial_err]
catch {
$R($slave_id) config rewrite
$R($slave_id) debug restart
restart_server [expr {0-$slave_id}] true
set R($slave_id) [srv [expr {0-$slave_id}] client]
}
# note: just waiting for connected_slaves==4 has a race condition since
# we might do the check before the master realized that the slave disconnected
@ -328,7 +329,8 @@ start_server {} {
catch {
$R($slave_id) config rewrite
$R($slave_id) debug restart
restart_server [expr {0-$slave_id}] true
set R($slave_id) [srv [expr {0-$slave_id}] client]
}
# Reconfigure the slave correctly again, when it's back online.

View File

@ -137,18 +137,8 @@ test {client freed during loading} {
# 100mb of rdb, 100k keys will load in more than 1 second
r debug populate 100000 key 1000
catch {
r debug restart
}
restart_server 0 false
set stdout [srv 0 stdout]
while 1 {
# check that the new server actually started and is ready for connections
if {[exec grep -i "Server initialized" | wc -l < $stdout] > 1} {
break
}
after 10
}
# make sure it's still loading
assert_equal [s loading] 1
@ -180,4 +170,4 @@ test {client freed during loading} {
# no need to keep waiting for loading to complete
exec kill [srv 0 pid]
}
}
}

View File

@ -1,14 +1,13 @@
source tests/support/cli.tcl
start_server {tags {"cli"}} {
proc open_cli {} {
proc open_cli {{opts "-n 9"}} {
set ::env(TERM) dumb
set cmdline [rediscli [srv port] "-n 9"]
set cmdline [rediscli [srv port] $opts]
set fd [open "|$cmdline" "r+"]
fconfigure $fd -buffering none
fconfigure $fd -blocking false
fconfigure $fd -translation binary
assert_equal "redis> " [read_cli $fd]
set _ $fd
}
@ -32,11 +31,14 @@ start_server {tags {"cli"}} {
}
# Helpers to run tests in interactive mode
proc format_output {output} {
set _ [string trimright [regsub -all "\r" $output ""] "\n"]
}
proc run_command {fd cmd} {
write_cli $fd $cmd
set lines [split [read_cli $fd] "\n"]
assert_equal "redis> " [lindex $lines end]
join [lrange $lines 0 end-1] "\n"
set _ [format_output [read_cli $fd]]
}
proc test_interactive_cli {name code} {
@ -58,7 +60,7 @@ start_server {tags {"cli"}} {
proc _run_cli {opts args} {
set cmd [rediscli [srv port] [list -n 9 {*}$args]]
foreach {key value} $args {
foreach {key value} $opts {
if {$key eq "pipe"} {
set cmd "sh -c \"$value | $cmd\""
}
@ -72,7 +74,7 @@ start_server {tags {"cli"}} {
fconfigure $fd -translation binary
set resp [read $fd 1048576]
close $fd
set _ $resp
set _ [format_output $resp]
}
proc run_cli {args} {
@ -80,11 +82,11 @@ start_server {tags {"cli"}} {
}
proc run_cli_with_input_pipe {cmd args} {
_run_cli [list pipe $cmd] {*}$args
_run_cli [list pipe $cmd] -x {*}$args
}
proc run_cli_with_input_file {path args} {
_run_cli [list path $path] {*}$args
_run_cli [list path $path] -x {*}$args
}
proc test_nontty_cli {name code} {
@ -101,7 +103,7 @@ start_server {tags {"cli"}} {
test_interactive_cli "INFO response should be printed raw" {
set lines [split [run_command $fd info] "\n"]
foreach line $lines {
assert [regexp {^[a-z0-9_]+:[a-z0-9_]+} $line]
assert [regexp {^$|^#|^[a-z0-9_]+:.+} $line]
}
}
@ -121,7 +123,7 @@ start_server {tags {"cli"}} {
test_interactive_cli "Multi-bulk reply" {
r rpush list foo
r rpush list bar
assert_equal "1. \"foo\"\n2. \"bar\"" [run_command $fd "lrange list 0 -1"]
assert_equal "1) \"foo\"\n2) \"bar\"" [run_command $fd "lrange list 0 -1"]
}
test_interactive_cli "Parsing quotes" {
@ -144,35 +146,35 @@ start_server {tags {"cli"}} {
}
test_tty_cli "Status reply" {
assert_equal "OK\n" [run_cli set key bar]
assert_equal "OK" [run_cli set key bar]
assert_equal "bar" [r get key]
}
test_tty_cli "Integer reply" {
r del counter
assert_equal "(integer) 1\n" [run_cli incr counter]
assert_equal "(integer) 1" [run_cli incr counter]
}
test_tty_cli "Bulk reply" {
r set key "tab\tnewline\n"
assert_equal "\"tab\\tnewline\\n\"\n" [run_cli get key]
assert_equal "\"tab\\tnewline\\n\"" [run_cli get key]
}
test_tty_cli "Multi-bulk reply" {
r del list
r rpush list foo
r rpush list bar
assert_equal "1. \"foo\"\n2. \"bar\"\n" [run_cli lrange list 0 -1]
assert_equal "1) \"foo\"\n2) \"bar\"" [run_cli lrange list 0 -1]
}
test_tty_cli "Read last argument from pipe" {
assert_equal "OK\n" [run_cli_with_input_pipe "echo foo" set key]
assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key]
assert_equal "foo\n" [r get key]
}
test_tty_cli "Read last argument from file" {
set tmpfile [write_tmpfile "from file"]
assert_equal "OK\n" [run_cli_with_input_file $tmpfile set key]
assert_equal "OK" [run_cli_with_input_file $tmpfile set key]
assert_equal "from file" [r get key]
}
@ -188,7 +190,7 @@ start_server {tags {"cli"}} {
test_nontty_cli "Bulk reply" {
r set key "tab\tnewline\n"
assert_equal "tab\tnewline\n" [run_cli get key]
assert_equal "tab\tnewline" [run_cli get key]
}
test_nontty_cli "Multi-bulk reply" {
@ -208,4 +210,79 @@ start_server {tags {"cli"}} {
assert_equal "OK" [run_cli_with_input_file $tmpfile set key]
assert_equal "from file" [r get key]
}
proc test_redis_cli_rdb_dump {} {
r flushdb
set dir [lindex [r config get dir] 1]
assert_equal "OK" [r debug populate 100000 key 1000]
catch {run_cli --rdb "$dir/cli.rdb"} output
assert_match {*Transfer finished with success*} $output
file delete "$dir/dump.rdb"
file rename "$dir/cli.rdb" "$dir/dump.rdb"
assert_equal "OK" [r set should-not-exist 1]
assert_equal "OK" [r debug reload nosave]
assert_equal {} [r get should-not-exist]
}
test_nontty_cli "Dumping an RDB" {
# Disk-based master
assert_match "OK" [r config set repl-diskless-sync no]
test_redis_cli_rdb_dump
# Disk-less master
assert_match "OK" [r config set repl-diskless-sync yes]
assert_match "OK" [r config set repl-diskless-sync-delay 0]
test_redis_cli_rdb_dump
}
test_nontty_cli "Connecting as a replica" {
set fd [open_cli "--replica"]
wait_for_condition 50 500 {
[string match {*slave0:*state=online*} [r info]]
} else {
fail "redis-cli --replica did not connect"
}
for {set i 0} {$i < 100} {incr i} {
r set test-key test-value-$i
}
r client kill type slave
catch {
assert_match {*SET*key-a*} [read_cli $fd]
}
close_cli $fd
}
test_nontty_cli "Piping raw protocol" {
set fd [open_cli "--pipe"]
fconfigure $fd -blocking true
# Create a new deferring client and overwrite its fd
set client [redis [srv 0 "host"] [srv 0 "port"] 1 0]
set ::redis::fd($::redis::id) $fd
$client select 9
r del test-counter
for {set i 0} {$i < 10000} {incr i} {
$client incr test-counter
$client set large-key [string repeat "x" 20000]
}
for {set i 0} {$i < 1000} {incr i} {
$client set very-large-key [string repeat "x" 512000]
}
close $fd write
set output [read_cli $fd]
assert_equal {10000} [r get test-counter]
assert_match {*All data transferred*errors: 0*replies: 21001*} $output
close_cli $fd
}
}

View File

@ -430,6 +430,7 @@ test {diskless loading short read} {
}
# Start the replication process...
set loglines [count_log_lines -1]
$master config set repl-diskless-sync-delay 0
$replica replicaof $master_host $master_port
@ -439,7 +440,7 @@ test {diskless loading short read} {
for {set i 0} {$i < $attempts} {incr i} {
# wait for the replica to start reading the rdb
# using the log file since the replica only responds to INFO once in 2mb
wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1
wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1
# add some additional random sleep so that we kill the master on a different place each time
after [expr {int(rand()*100)}]
@ -448,7 +449,7 @@ test {diskless loading short read} {
set killed [$master client kill type replica]
if {[catch {
set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10]
set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10]
if {$::verbose} {
puts $res
}
@ -461,6 +462,7 @@ test {diskless loading short read} {
$master config set repl-backlog-size [expr {16384 + $i}]
}
# wait for loading to stop (fail)
set loglines [count_log_lines -1]
wait_for_condition 100 10 {
[s -1 loading] eq 0
} else {
@ -535,6 +537,7 @@ start_server {tags {"repl"}} {
# start replication
# it's enough for just one replica to be slow, and have it's write handler enabled
# so that the whole rdb generation process is bound to that
set loglines [count_log_lines -1]
[lindex $replicas 0] config set repl-diskless-load swapdb
[lindex $replicas 0] config set key-load-delay 100
[lindex $replicas 0] replicaof $master_host $master_port
@ -542,7 +545,7 @@ start_server {tags {"repl"}} {
# wait for the replicas to start reading the rdb
# using the log file since the replica only responds to INFO once in 2mb
wait_for_log_message -1 "*Loading DB in memory*" 8 800 10
wait_for_log_message -1 "*Loading DB in memory*" $loglines 800 10
if {$measure_time} {
set master_statfile "/proc/$master_pid/stat"
@ -558,6 +561,7 @@ start_server {tags {"repl"}} {
$master incr $all_drop
# disconnect replicas depending on the current test
set loglines [count_log_lines -2]
if {$all_drop == "all" || $all_drop == "fast"} {
exec kill [srv 0 pid]
set replicas_alive [lreplace $replicas_alive 1 1]
@ -576,13 +580,13 @@ start_server {tags {"repl"}} {
# make sure we got what we were aiming for, by looking for the message in the log file
if {$all_drop == "all"} {
wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" 12 1 1
wait_for_log_message -2 "*Diskless rdb transfer, last replica dropped, killing fork child*" $loglines 1 1
}
if {$all_drop == "no"} {
wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" 12 1 1
wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 2 replicas still up*" $loglines 1 1
}
if {$all_drop == "slow" || $all_drop == "fast"} {
wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" 12 1 1
wait_for_log_message -2 "*Diskless rdb transfer, done reading from pipe, 1 replicas still up*" $loglines 1 1
}
# make sure we don't have a busy loop going thought epoll_wait

View File

@ -17,7 +17,14 @@ proc check_valgrind_errors stderr {
set buf [read $fd]
close $fd
# look for stack trace and other errors, or the absense of a leak free summary
if {[regexp -- { at 0x} $buf] ||
[regexp -- {Warning} $buf] ||
[regexp -- {Invalid} $buf] ||
[regexp -- {Mismatched} $buf] ||
[regexp -- {uninitialized} $buf] ||
[regexp -- {has a fishy} $buf] ||
[regexp -- {overlap} $buf] ||
(![regexp -- {definitely lost: 0 bytes} $buf] &&
![regexp -- {no leaks are possible} $buf])} {
send_data_packet $::test_server_fd err "Valgrind error: $buf\n"
@ -29,7 +36,13 @@ proc kill_server config {
if {$::external} return
# nevermind if its already dead
if {![is_alive $config]} { return }
if {![is_alive $config]} {
# Check valgrind errors if needed
if {$::valgrind} {
check_valgrind_errors [dict get $config stderr]
}
return
}
set pid [dict get $config pid]
# check for leaks
@ -153,6 +166,55 @@ proc create_server_config_file {filename config} {
close $fp
}
proc spawn_server {config_file stdout stderr} {
if {$::valgrind} {
set pid [exec valgrind --track-origins=yes --trace-children=yes --suppressions=[pwd]/src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file >> $stdout 2>> $stderr &]
} elseif ($::stack_logging) {
set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt src/redis-server $config_file >> $stdout 2>> $stderr &]
} else {
set pid [exec src/redis-server $config_file >> $stdout 2>> $stderr &]
}
if {$::wait_server} {
set msg "server started PID: $pid. press any key to continue..."
puts $msg
read stdin 1
}
# Tell the test server about this new instance.
send_data_packet $::test_server_fd server-spawned $pid
return $pid
}
# Wait for actual startup, return 1 if port is busy, 0 otherwise
proc wait_server_started {config_file stdout pid} {
set checkperiod 100; # Milliseconds
set maxiter [expr {120*1000/$checkperiod}] ; # Wait up to 2 minutes.
set port_busy 0
while 1 {
if {[regexp -- " PID: $pid" [exec cat $stdout]]} {
break
}
after $checkperiod
incr maxiter -1
if {$maxiter == 0} {
start_server_error $config_file "No PID detected in log $stdout"
puts "--- LOG CONTENT ---"
puts [exec cat $stdout]
puts "-------------------"
break
}
# Check if the port is actually busy and the server failed
# for this reason.
if {[regexp {Could not create server TCP} [exec cat $stdout]]} {
set port_busy 1
break
}
}
return $port_busy
}
proc start_server {options {code undefined}} {
# If we are running against an external server, we just push the
# host/port pair in the stack the first time
@ -248,44 +310,10 @@ proc start_server {options {code undefined}} {
send_data_packet $::test_server_fd "server-spawning" "port $port"
if {$::valgrind} {
set pid [exec valgrind --track-origins=yes --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &]
} elseif ($::stack_logging) {
set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt src/redis-server $config_file > $stdout 2> $stderr &]
} else {
set pid [exec src/redis-server $config_file > $stdout 2> $stderr &]
}
# Tell the test server about this new instance.
send_data_packet $::test_server_fd server-spawned $pid
set pid [spawn_server $config_file $stdout $stderr]
# check that the server actually started
# ugly but tries to be as fast as possible...
if {$::valgrind} {set retrynum 1000} else {set retrynum 100}
# Wait for actual startup
set checkperiod 100; # Milliseconds
set maxiter [expr {120*1000/100}] ; # Wait up to 2 minutes.
set port_busy 0
while {![info exists _pid]} {
regexp {PID:\s(\d+)} [exec cat $stdout] _ _pid
after $checkperiod
incr maxiter -1
if {$maxiter == 0} {
start_server_error $config_file "No PID detected in log $stdout"
puts "--- LOG CONTENT ---"
puts [exec cat $stdout]
puts "-------------------"
break
}
# Check if the port is actually busy and the server failed
# for this reason.
if {[regexp {Could not create server TCP} [exec cat $stdout]]} {
set port_busy 1
break
}
}
set port_busy [wait_server_started $config_file $stdout $pid]
# Sometimes we have to try a different port, even if we checked
# for availability. Other test clients may grab the port before we
@ -302,6 +330,7 @@ proc start_server {options {code undefined}} {
continue; # Try again
}
if {$::valgrind} {set retrynum 1000} else {set retrynum 100}
if {$code ne "undefined"} {
set serverisup [server_is_up $::host $port $retrynum]
} else {
@ -345,12 +374,6 @@ proc start_server {options {code undefined}} {
error_and_quit $config_file $line
}
if {$::wait_server} {
set msg "server started PID: [dict get $srv "pid"]. press any key to continue..."
puts $msg
read stdin 1
}
while 1 {
# check that the server actually started and is ready for connections
if {[exec grep -i "Ready to accept" | wc -l < $stdout] > 0} {
@ -370,6 +393,9 @@ proc start_server {options {code undefined}} {
if {[catch { uplevel 1 $code } error]} {
set backtrace $::errorInfo
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)
set srv [lindex $::servers end]
# Kill the server without checking for leaks
dict set srv "skipleaks" 1
kill_server $srv
@ -387,6 +413,9 @@ proc start_server {options {code undefined}} {
error $error $backtrace
}
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)
set srv [lindex $::servers end]
# Don't do the leak check when no tests were run
if {$num_tests == $::num_tests} {
dict set srv "skipleaks" 1
@ -402,3 +431,35 @@ proc start_server {options {code undefined}} {
set _ $srv
}
}
proc restart_server {level wait_ready} {
set srv [lindex $::servers end+$level]
kill_server $srv
set stdout [dict get $srv "stdout"]
set stderr [dict get $srv "stderr"]
set config_file [dict get $srv "config_file"]
set prev_ready_count [exec grep -i "Ready to accept" | wc -l < $stdout]
set pid [spawn_server $config_file $stdout $stderr]
# check that the server actually started
wait_server_started $config_file $stdout $pid
# update the pid in the servers list
dict set srv "pid" $pid
# re-set $srv in the servers list
lset ::servers end+$level $srv
if {$wait_ready} {
while 1 {
# check that the server actually started and is ready for connections
if {[exec grep -i "Ready to accept" | wc -l < $stdout] > $prev_ready_count + 1} {
break
}
after 10
}
}
reconnect $level
}

View File

@ -99,11 +99,27 @@ proc wait_for_ofs_sync {r1 r2} {
}
}
proc wait_for_log_message {srv_idx pattern last_lines maxtries delay} {
# count current log lines in server's stdout
proc count_log_lines {srv_idx} {
set _ [exec wc -l < [srv $srv_idx stdout]]
}
# verify pattern exists in server's sdtout after a certain line number
proc verify_log_message {srv_idx pattern from_line} {
set lines_after [count_log_lines]
set lines [expr $lines_after - $from_line]
set result [exec tail -$lines < [srv $srv_idx stdout]]
if {![string match $pattern $result]} {
error "assertion:expected message not found in log file: $pattern"
}
}
# wait for pattern to be found in server's stdout after certain line number
proc wait_for_log_message {srv_idx pattern from_line maxtries delay} {
set retry $maxtries
set stdout [srv $srv_idx stdout]
while {$retry} {
set result [exec tail -$last_lines < $stdout]
set result [exec tail +$from_line < $stdout]
set result [split $result "\n"]
foreach line $result {
if {[string match $pattern $line]} {
@ -114,7 +130,7 @@ proc wait_for_log_message {srv_idx pattern last_lines maxtries delay} {
after $delay
}
if {$retry == 0} {
fail "log message of '$pattern' not found"
fail "log message of '$pattern' not found in $stdout after line: $from_line"
}
}

View File

@ -35,6 +35,7 @@ set ::all_tests {
unit/quit
unit/aofrw
unit/acl
unit/latency-monitor
integration/block-repl
integration/replication
integration/replication-2
@ -48,6 +49,7 @@ set ::all_tests {
integration/psync2
integration/psync2-reg
integration/psync2-pingoff
integration/redis-cli
unit/pubsub
unit/slowlog
unit/scripting

View File

@ -36,7 +36,18 @@ start_server {tags {"dump"}} {
assert {$ttl >= 2900 && $ttl <= 3100}
r get foo
} {bar}
test {RESTORE with ABSTTL in the past} {
r set foo bar
set encoded [r dump foo]
set now [clock milliseconds]
r debug set-active-expire 0
r restore foo [expr $now-3000] $encoded absttl REPLACE
catch {r debug object foo} e
r debug set-active-expire 1
set e
} {ERR no such key}
test {RESTORE can set LRU} {
r set foo bar
set encoded [r dump foo]

View File

@ -78,17 +78,8 @@ start_server {tags {"introspection"}} {
syslog-facility
databases
port
io-threads
tls-port
tls-prefer-server-ciphers
tls-cert-file
tls-key-file
tls-dh-params-file
tls-ca-cert-file
tls-ca-cert-dir
tls-protocols
tls-ciphers
tls-ciphersuites
io-threads
logfile
unixsocketperm
slaveof
@ -100,6 +91,23 @@ start_server {tags {"introspection"}} {
bgsave_cpulist
}
if {!$::tls} {
append skip_configs {
tls-prefer-server-ciphers
tls-session-cache-timeout
tls-session-cache-size
tls-session-caching
tls-cert-file
tls-key-file
tls-dh-params-file
tls-ca-cert-file
tls-ca-cert-dir
tls-protocols
tls-ciphers
tls-ciphersuites
}
}
set configs {}
foreach {k v} [r config get *] {
if {[lsearch $skip_configs $k] != -1} {

View File

@ -67,6 +67,7 @@ tags "modules" {
}
# Start the replication process...
set loglines [count_log_lines -1]
$master config set repl-diskless-sync-delay 0
$replica replicaof $master_host $master_port
@ -76,7 +77,7 @@ tags "modules" {
for {set i 0} {$i < $attempts} {incr i} {
# wait for the replica to start reading the rdb
# using the log file since the replica only responds to INFO once in 2mb
wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1
wait_for_log_message -1 "*Loading DB in memory*" $loglines 2000 1
# add some additional random sleep so that we kill the master on a different place each time
after [expr {int(rand()*100)}]
@ -85,7 +86,7 @@ tags "modules" {
set killed [$master client kill type replica]
if {[catch {
set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10]
set res [wait_for_log_message -1 "*Internal error in RDB*" $loglines 100 10]
if {$::verbose} {
puts $res
}
@ -98,6 +99,7 @@ tags "modules" {
$master config set repl-backlog-size [expr {16384 + $i}]
}
# wait for loading to stop (fail)
set loglines [count_log_lines -1]
wait_for_condition 100 10 {
[s -1 loading] eq 0
} else {

View File

@ -130,15 +130,18 @@ start_server {tags {"incr"}} {
format $err
} {WRONGTYPE*}
test {INCRBYFLOAT does not allow NaN or Infinity} {
r set foo 0
set err {}
catch {r incrbyfloat foo +inf} err
set err
# p.s. no way I can force NaN to test it from the API because
# there is no way to increment / decrement by infinity nor to
# perform divisions.
} {ERR*would produce*}
# On some platforms strtold("+inf") with valgrind returns a non-inf result
if {!$::valgrind} {
test {INCRBYFLOAT does not allow NaN or Infinity} {
r set foo 0
set err {}
catch {r incrbyfloat foo +inf} err
set err
# p.s. no way I can force NaN to test it from the API because
# there is no way to increment / decrement by infinity nor to
# perform divisions.
} {ERR*would produce*}
}
test {INCRBYFLOAT decrement} {
r set foo 1

View File

@ -53,7 +53,7 @@ def commands
require "json"
require "uri"
url = URI.parse "https://raw.githubusercontent.com/antirez/redis-doc/master/commands.json"
url = URI.parse "https://raw.githubusercontent.com/redis/redis-doc/master/commands.json"
client = Net::HTTP.new url.host, url.port
client.use_ssl = true
response = client.get url.path

View File

@ -30,6 +30,6 @@ append template [exec git log $branch~$count..$branch "--format=format:%an in co
#Older, more verbose version.
#
#append template [exec git log $branch~30..$branch "--format=format:+-------------------------------------------------------------------------------%n| %s%n| By %an, %ai%n+--------------------------------------------------------------------------------%nhttps://github.com/antirez/redis/commit/%H%n%n%b" --stat]
#append template [exec git log $branch~30..$branch "--format=format:+-------------------------------------------------------------------------------%n| %s%n| By %an, %ai%n+--------------------------------------------------------------------------------%nhttps://github.com/redis/redis/commit/%H%n%n%b" --stat]
puts $template