diff --git a/redis.conf b/redis.conf index c54dba392..6434a52d1 100644 --- a/redis.conf +++ b/redis.conf @@ -637,7 +637,7 @@ slave-priority 100 # it with the specified string. # 4) During replication, when a slave performs a full resynchronization with # its master, the content of the whole database is removed in order to -# load the RDB file just transfered. +# load the RDB file just transferred. # # In all the above cases the default is to delete objects in a blocking way, # like if DEL was called. However you can configure each case specifically diff --git a/src/aof.c b/src/aof.c index 0593b2707..248456a8b 100644 --- a/src/aof.c +++ b/src/aof.c @@ -640,7 +640,7 @@ int loadAppendOnlyFile(char *filename) { exit(1); } - /* Handle a zero-length AOF file as a special case. An emtpy AOF file + /* Handle a zero-length AOF file as a special case. An empty AOF file * is a valid AOF because an empty server with AOF enabled will create * a zero length file at startup, that will remain like that if no write * operation is received. */ @@ -1560,7 +1560,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { "Background AOF rewrite signal handler took %lldus", ustime()-now); } else if (!bysignal && exitcode != 0) { /* SIGUSR1 is whitelisted, so we have a way to kill a child without - * tirggering an error conditon. */ + * tirggering an error condition. */ if (bysignal != SIGUSR1) server.aof_lastbgrewrite_status = C_ERR; serverLog(LL_WARNING, diff --git a/src/atomicvar.h b/src/atomicvar.h index 84a5bbc5c..173b045fc 100644 --- a/src/atomicvar.h +++ b/src/atomicvar.h @@ -16,7 +16,7 @@ * pthread_mutex_t myvar_mutex; * atomicSet(myvar,12345); * - * If atomic primitives are availble (tested in config.h) the mutex + * If atomic primitives are available (tested in config.h) the mutex * is not used. * * Never use return value from the macros, instead use the AtomicGetIncr() diff --git a/src/bitops.c b/src/bitops.c index 43450fca3..23f2266a7 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -918,7 +918,7 @@ void bitfieldCommand(client *c) { struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */ int owtype = BFOVERFLOW_WRAP; /* Overflow type. */ int readonly = 1; - size_t higest_write_offset = 0; + size_t highest_write_offset = 0; for (j = 2; j < c->argc; j++) { int remargs = c->argc-j-1; /* Remaining args other than current. */ @@ -968,8 +968,8 @@ void bitfieldCommand(client *c) { if (opcode != BITFIELDOP_GET) { readonly = 0; - if (higest_write_offset < bitoffset + bits - 1) - higest_write_offset = bitoffset + bits - 1; + if (highest_write_offset < bitoffset + bits - 1) + highest_write_offset = bitoffset + bits - 1; /* INCRBY and SET require another argument. */ if (getLongLongFromObjectOrReply(c,c->argv[j+3],&i64,NULL) != C_OK){ zfree(ops); @@ -999,7 +999,7 @@ void bitfieldCommand(client *c) { /* Lookup by making room up to the farest bit reached by * this operation. */ if ((o = lookupStringForBitCommand(c, - higest_write_offset)) == NULL) return; + highest_write_offset)) == NULL) return; } addReplyMultiBulkLen(c,numops); diff --git a/src/cluster.c b/src/cluster.c index 407ddee82..6d5a26dd7 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -2989,7 +2989,7 @@ void clusterHandleSlaveFailover(void) { (unsigned long long) myself->configEpoch); } - /* Take responsability for the cluster slots. */ + /* Take responsibility for the cluster slots. */ clusterFailoverReplaceYourMaster(); } else { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES); @@ -3040,11 +3040,11 @@ void clusterHandleSlaveMigration(int max_slaves) { !nodeTimedOut(mymaster->slaves[j])) okslaves++; if (okslaves <= server.cluster_migration_barrier) return; - /* Step 3: Idenitfy a candidate for migration, and check if among the + /* Step 3: Identify a candidate for migration, and check if among the * masters with the greatest number of ok slaves, I'm the one with the * smallest node ID (the "candidate slave"). * - * Note: this means that eventually a replica migration will occurr + * Note: this means that eventually a replica migration will occur * since slaves that are reachable again always have their FAIL flag * cleared, so eventually there must be a candidate. At the same time * this does not mean that there are no race conditions possible (two @@ -3620,7 +3620,7 @@ void clusterCloseAllSlots(void) { * -------------------------------------------------------------------------- */ /* The following are defines that are only used in the evaluation function - * and are based on heuristics. Actaully the main point about the rejoin and + * and are based on heuristics. Actually the main point about the rejoin and * writable delay is that they should be a few orders of magnitude larger * than the network latency. */ #define CLUSTER_MAX_REJOIN_DELAY 5000 @@ -5376,7 +5376,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co if (error_code == CLUSTER_REDIR_CROSS_SLOT) { addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n")); } else if (error_code == CLUSTER_REDIR_UNSTABLE) { - /* The request spawns mutliple keys in the same slot, + /* The request spawns multiple keys in the same slot, * but the slot is not "stable" currently as there is * a migration or import in progress. */ addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n")); diff --git a/src/cluster.h b/src/cluster.h index af85841c9..2dff0be72 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -230,7 +230,7 @@ union clusterMsgData { #define CLUSTER_PROTO_VER 1 /* Cluster bus protocol version. */ typedef struct { - char sig[4]; /* Siganture "RCmb" (Redis Cluster message bus). */ + char sig[4]; /* Signature "RCmb" (Redis Cluster message bus). */ uint32_t totlen; /* Total length of this message */ uint16_t ver; /* Protocol version, currently set to 1. */ uint16_t port; /* TCP base port number. */ diff --git a/src/db.c b/src/db.c index 7d1504d30..f6a91a9eb 100644 --- a/src/db.c +++ b/src/db.c @@ -106,7 +106,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { * safety measure, the command invoked is a read-only command, we can * safely return NULL here, and provide a more consistent behavior * to clients accessign expired values in a read-only fashion, that - * will say the key as non exisitng. + * will say the key as non existing. * * Notably this covers GETs when slaves are used to scale reads. */ if (server.current_client && @@ -296,7 +296,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { * If callback is given the function is called from time to time to * signal that work is in progress. * - * The dbnum can be -1 if all teh DBs should be flushed, or the specified + * The dbnum can be -1 if all the DBs should be flushed, or the specified * DB number if we want to flush only a single Redis database number. * * Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or diff --git a/src/debug.c b/src/debug.c index a4caa49f2..91d3f4158 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1012,7 +1012,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { "Redis %s crashed by signal: %d", REDIS_VERSION, sig); if (eip != NULL) { serverLog(LL_WARNING, - "Crashed running the instuction at: %p", eip); + "Crashed running the instruction at: %p", eip); } if (sig == SIGSEGV || sig == SIGBUS) { serverLog(LL_WARNING, diff --git a/src/defrag.c b/src/defrag.c index 4a1dcefe4..3094403c5 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -558,7 +558,7 @@ void activeDefragCycle(void) { cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db); /* Once in 16 scan iterations, or 1000 pointer reallocations * (if we have a lot of pointers in one hash bucket), check if we - * reached the tiem limit. */ + * reached the time limit. */ if (cursor && (++iterations > 16 || server.stat_active_defrag_hits - defragged > 1000)) { if ((ustime() - start) > timelimit) { return; diff --git a/src/dict.c b/src/dict.c index 69fb3b8f8..e13aeaca3 100644 --- a/src/dict.c +++ b/src/dict.c @@ -327,7 +327,7 @@ int dictReplace(dict *d, void *key, void *val) dictEntry *entry, *existing, auxentry; /* Try to add the element. If the key - * does not exists dictAdd will suceed. */ + * does not exists dictAdd will succeed. */ entry = dictAddRaw(d,key,&existing); if (entry) { dictSetVal(d, entry, val); diff --git a/src/endianconv.h b/src/endianconv.h index 08f553136..569f9137b 100644 --- a/src/endianconv.h +++ b/src/endianconv.h @@ -43,7 +43,7 @@ uint16_t intrev16(uint16_t v); uint32_t intrev32(uint32_t v); uint64_t intrev64(uint64_t v); -/* variants of the function doing the actual convertion only if the target +/* variants of the function doing the actual conversion only if the target * host is big endian */ #if (BYTE_ORDER == LITTLE_ENDIAN) #define memrev16ifbe(p) diff --git a/src/geo.c b/src/geo.c index 8423931af..4bff0bd01 100644 --- a/src/geo.c +++ b/src/geo.c @@ -145,7 +145,7 @@ double extractUnitOrReply(client *c, robj *unit) { /* Input Argument Helper. * Extract the dinstance from the specified two arguments starting at 'argv' * that shouldbe in the form: and return the dinstance in the - * specified unit on success. *conversino is populated with the coefficient + * specified unit on success. *conversions is populated with the coefficient * to use in order to convert meters to the unit. * * On error a value less than zero is returned. */ diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 49516f824..e115b1a0c 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -665,7 +665,7 @@ int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) { end = p + sdslen(o->ptr) - HLL_HDR_SIZE; first = 0; - prev = NULL; /* Points to previos opcode at the end of the loop. */ + prev = NULL; /* Points to previous opcode at the end of the loop. */ next = NULL; /* Points to the next opcode at the end of the loop. */ span = 0; while(p < end) { @@ -756,7 +756,7 @@ int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) { * and is either currently represented by a VAL opcode with len > 1, * by a ZERO opcode with len > 1, or by an XZERO opcode. * - * In those cases the original opcode must be split into muliple + * In those cases the original opcode must be split into multiple * opcodes. The worst case is an XZERO split in the middle resuling into * XZERO - VAL - XZERO, so the resulting sequence max length is * 5 bytes. @@ -879,7 +879,7 @@ promote: /* Promote to dense representation. */ * * Note that this in turn means that PFADD will make sure the command * is propagated to slaves / AOF, so if there is a sparse -> dense - * convertion, it will be performed in all the slaves as well. */ + * conversion, it will be performed in all the slaves as well. */ int dense_retval = hllDenseAdd(hdr->registers, ele, elesize); serverAssert(dense_retval == 1); return dense_retval; diff --git a/src/latency.c b/src/latency.c index 9e9f1f13a..c1477d7b1 100644 --- a/src/latency.c +++ b/src/latency.c @@ -151,7 +151,7 @@ int latencyResetEvent(char *event_to_reset) { /* ------------------------ Latency reporting (doctor) ---------------------- */ -/* Analyze the samples avaialble for a given event and return a structure +/* Analyze the samples available for a given event and return a structure * populate with different metrics, average, MAD, min, max, and so forth. * Check latency.h definition of struct latenctStat for more info. * If the specified event has no elements the structure is populate with diff --git a/src/lazyfree.c b/src/lazyfree.c index 809ebdb57..4bfad92f9 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -23,10 +23,10 @@ size_t lazyfreeGetPendingObjectsCount(void) { * the function just returns the number of elements the object is composed of. * * Objects composed of single allocations are always reported as having a - * single item even if they are actaully logical composed of multiple + * single item even if they are actually logical composed of multiple * elements. * - * For lists the funciton returns the number of elements in the quicklist + * For lists the function returns the number of elements in the quicklist * representing the list. */ size_t lazyfreeGetFreeEffort(robj *obj) { if (obj->type == OBJ_LIST) { diff --git a/src/module.c b/src/module.c index 35e479927..df74ee55a 100644 --- a/src/module.c +++ b/src/module.c @@ -3520,7 +3520,7 @@ void moduleInitModulesSystem(void) { * because the server must be fully initialized before loading modules. * * The function aborts the server on errors, since to start with missing - * modules is not considered sane: clients may rely on the existance of + * modules is not considered sane: clients may rely on the existence of * given commands, loading AOF also may need some modules to exist, and * if this instance is a slave, it must understand commands from master. */ void moduleLoadFromQueue(void) { diff --git a/src/modules/INTRO.md b/src/modules/INTRO.md index 3ac6a4673..1d4a20137 100644 --- a/src/modules/INTRO.md +++ b/src/modules/INTRO.md @@ -535,7 +535,7 @@ both modes. Currently a key opened for writing can also be accessed for reading but this is to be considered an implementation detail. The right mode should be used in sane modules. -You can open non exisitng keys for writing, since the keys will be created +You can open non existing keys for writing, since the keys will be created when an attempt to write to the key is performed. However when opening keys just for reading, `RedisModule_OpenKey` will return NULL if the key does not exist. @@ -664,7 +664,7 @@ is used. Example: RedisModule_StringTruncate(mykey,1024); The function truncates, or enlarges the string as needed, padding it with -zero bytes if the previos length is smaller than the new length we request. +zero bytes if the previous length is smaller than the new length we request. If the string does not exist since `key` is associated to an open empty key, a string value is created and associated to the key. diff --git a/src/modules/gendoc.rb b/src/modules/gendoc.rb index b3dbf1ca7..1ca5cecdd 100644 --- a/src/modules/gendoc.rb +++ b/src/modules/gendoc.rb @@ -1,5 +1,5 @@ # gendoc.rb -- Converts the top-comments inside module.c to modules API -# reference documentaiton in markdown format. +# reference documentation in markdown format. # Convert the C comment to markdown def markdown(s) diff --git a/src/quicklist.c b/src/quicklist.c index c8b72743c..72f031eee 100644 --- a/src/quicklist.c +++ b/src/quicklist.c @@ -1636,7 +1636,7 @@ int quicklistTest(int argc, char *argv[]) { TEST("add to tail of empty list") { quicklist *ql = quicklistNew(-2, options[_i]); quicklistPushTail(ql, "hello", 6); - /* 1 for head and 1 for tail beacuse 1 node = head = tail */ + /* 1 for head and 1 for tail because 1 node = head = tail */ ql_verify(ql, 1, 1, 1, 1); quicklistRelease(ql); } @@ -1644,7 +1644,7 @@ int quicklistTest(int argc, char *argv[]) { TEST("add to head of empty list") { quicklist *ql = quicklistNew(-2, options[_i]); quicklistPushHead(ql, "hello", 6); - /* 1 for head and 1 for tail beacuse 1 node = head = tail */ + /* 1 for head and 1 for tail because 1 node = head = tail */ ql_verify(ql, 1, 1, 1, 1); quicklistRelease(ql); } diff --git a/src/rax.c b/src/rax.c index dda008dff..67adba629 100644 --- a/src/rax.c +++ b/src/rax.c @@ -448,7 +448,7 @@ int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) { /* If the node we stopped at is a compressed node, we need to * split it before to continue. * - * Splitting a compressed node have a few possibile cases. + * Splitting a compressed node have a few possible cases. * Imagine that the node 'h' we are currently at is a compressed * node contaning the string "ANNIBALE" (it means that it represents * nodes A -> N -> N -> I -> B -> A -> L -> E with the only child @@ -730,7 +730,7 @@ int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) { cp = raxNodeLastChildPtr(trimmed); memcpy(cp,&postfix,sizeof(postfix)); - /* Finish! We don't need to contine with the insertion + /* Finish! We don't need to continue with the insertion * algorithm for ALGO 2. The key is already inserted. */ rax->numele++; rax_free(h); diff --git a/src/rax.h b/src/rax.h index 6f91f4c1b..dd07ad1e4 100644 --- a/src/rax.h +++ b/src/rax.h @@ -94,7 +94,7 @@ typedef struct raxNode { * * If the node has an associated key (iskey=1) and is not NULL * (isnull=0), then after the raxNode pointers poiting to the - * childen, an additional value pointer is present (as you can see + * children, an additional value pointer is present (as you can see * in the representation above as "value-ptr" field). */ unsigned char data[]; diff --git a/src/rdb.c b/src/rdb.c index 18acb4195..542029b8c 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1684,7 +1684,7 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) { latencyEndMonitor(latency); latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency); /* SIGUSR1 is whitelisted, so we have a way to kill a child without - * tirggering an error conditon. */ + * tirggering an error condition. */ if (bysignal != SIGUSR1) server.lastbgsave_status = C_ERR; } @@ -1721,7 +1721,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { * in error state. * * If the process returned an error, consider the list of slaves that - * can continue to be emtpy, so that it's just a special case of the + * can continue to be empty, so that it's just a special case of the * normal code path. */ ok_slaves = zmalloc(sizeof(uint64_t)); /* Make space for the count. */ ok_slaves[0] = 0; diff --git a/src/redis-cli.c b/src/redis-cli.c index ee24cf3c7..0d037eb0e 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -2058,15 +2058,15 @@ static void getKeySizes(redisReply *keys, int *types, keys->element[i]->str); } - /* Retreive sizes */ + /* Retrieve sizes */ for(i=0;ielements;i++) { - /* Skip keys that dissapeared between SCAN and TYPE */ + /* Skip keys that disappeared between SCAN and TYPE */ if(types[i] == TYPE_NONE) { sizes[i] = 0; continue; } - /* Retreive size */ + /* Retrieve size */ if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { fprintf(stderr, "Error getting size for key '%s' (%d: %s)\n", keys->element[i]->str, context->err, context->errstr); @@ -2136,7 +2136,7 @@ static void findBigKeys(void) { arrsize = keys->elements; } - /* Retreive types and then sizes */ + /* Retrieve types and then sizes */ getKeyTypes(keys, types); getKeySizes(keys, types, sizes); diff --git a/src/redis-trib.rb b/src/redis-trib.rb index 39db97947..67d66c356 100755 --- a/src/redis-trib.rb +++ b/src/redis-trib.rb @@ -454,7 +454,7 @@ class RedisTrib # Handle case "1": keys in no node. if none.length > 0 - xputs "The folowing uncovered slots have no keys across the cluster:" + xputs "The following uncovered slots have no keys across the cluster:" xputs none.keys.join(",") yes_or_die "Fix these slots by covering with a random node?" none.each{|slot,nodes| @@ -466,7 +466,7 @@ class RedisTrib # Handle case "2": keys only in one node. if single.length > 0 - xputs "The folowing uncovered slots have keys in just one node:" + xputs "The following uncovered slots have keys in just one node:" puts single.keys.join(",") yes_or_die "Fix these slots by covering with those nodes?" single.each{|slot,nodes| @@ -477,7 +477,7 @@ class RedisTrib # Handle case "3": keys in multiple nodes. if multi.length > 0 - xputs "The folowing uncovered slots have keys in multiple nodes:" + xputs "The following uncovered slots have keys in multiple nodes:" xputs multi.keys.join(",") yes_or_die "Fix these slots by moving keys into a single node?" multi.each{|slot,nodes| @@ -1102,7 +1102,7 @@ class RedisTrib if numslots > 0 puts "Moving #{numslots} slots from #{src} to #{dst}" - # Actaully move the slots. + # Actually move the slots. reshard_table = compute_reshard_table([src],numslots) if reshard_table.length != numslots xputs "*** Assertio failed: Reshard table != number of slots" @@ -1622,7 +1622,7 @@ private ] end -# Turn a key name into the corrisponding Redis Cluster slot. +# Turn a key name into the corresponding Redis Cluster slot. def key_to_slot(key) # Only hash what is inside {...} if there is such a pattern in the key. # Note that the specification requires the content that is between diff --git a/src/redisassert.h b/src/redisassert.h index c9b78327c..61ab35a14 100644 --- a/src/redisassert.h +++ b/src/redisassert.h @@ -1,4 +1,4 @@ -/* redisassert.h -- Drop in replacemnet assert.h that prints the stack trace +/* redisassert.h -- Drop in replacements assert.h that prints the stack trace * in the Redis logs. * * This file should be included instead of "assert.h" inside libraries used by diff --git a/src/replication.c b/src/replication.c index 6be5d2631..8dfb1accb 100644 --- a/src/replication.c +++ b/src/replication.c @@ -553,7 +553,7 @@ need_full_resync: * Side effects, other than starting a BGSAVE: * * 1) Handle the slaves in WAIT_START state, by preparing them for a full - * sync if the BGSAVE was succesfully started, or sending them an error + * sync if the BGSAVE was successfully started, or sending them an error * and dropping them from the list of slaves. * * 2) Flush the Lua scripting script cache if the BGSAVE was actually @@ -895,7 +895,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) { } } - /* If the preamble was already transfered, send the RDB bulk data. */ + /* If the preamble was already transferred, send the RDB bulk data. */ lseek(slave->repldbfd,slave->repldboff,SEEK_SET); buflen = read(slave->repldbfd,buf,PROTO_IOBUF_LEN); if (buflen <= 0) { @@ -964,7 +964,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { replicationGetSlaveName(slave)); /* Note: we wait for a REPLCONF ACK message from slave in * order to really put it online (install the write handler - * so that the accumulated data can be transfered). However + * so that the accumulated data can be transferred). However * we change the replication state ASAP, since our slave * is technically online now. */ slave->replstate = SLAVE_STATE_ONLINE; @@ -1047,7 +1047,7 @@ int slaveIsInHandshakeState(void) { /* Avoid the master to detect the slave is timing out while loading the * RDB file in initial synchronization. We send a single newline character - * that is valid protocol but is guaranteed to either be sent entierly or + * that is valid protocol but is guaranteed to either be sent entirely or * not, since the byte is indivisible. * * The function is called in two contexts: while we flush the current @@ -1387,7 +1387,7 @@ char *sendSynchronousCommand(int flags, int fd, ...) { * * The function returns: * - * PSYNC_CONTINUE: If the PSYNC command succeded and we can continue. + * PSYNC_CONTINUE: If the PSYNC command succeeded and we can continue. * PSYNC_FULLRESYNC: If PSYNC is supported but a full resync is needed. * In this case the master run_id and global replication * offset is saved. @@ -2100,7 +2100,7 @@ void replicationSendAck(void) { * functions. */ /* This function is called by freeClient() in order to cache the master - * client structure instead of destryoing it. freeClient() will return + * client structure instead of destroying it. freeClient() will return * ASAP after this function returns, so every action needed to avoid problems * with a client that is really "suspended" has to be done by this function. * diff --git a/src/sds.c b/src/sds.c index eafa13c29..cb95dae73 100644 --- a/src/sds.c +++ b/src/sds.c @@ -271,7 +271,7 @@ sds sdsRemoveFreeSpace(sds s) { return s; } -/* Return the total size of the allocation of the specifed sds string, +/* Return the total size of the allocation of the specified sds string, * including: * 1) The sds header before the pointer. * 2) The string. diff --git a/src/sentinel.c b/src/sentinel.c index 6c6a3a0cd..e5da3d892 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -494,7 +494,7 @@ void sentinelIsRunning(void) { if (sentinel.myid[j] != 0) break; if (j == CONFIG_RUN_ID_SIZE) { - /* Pick ID and presist the config. */ + /* Pick ID and persist the config. */ getRandomHexChars(sentinel.myid,CONFIG_RUN_ID_SIZE); sentinelFlushConfig(); } @@ -2452,7 +2452,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd } /* Send an "Hello" message via Pub/Sub to the specified 'ri' Redis - * instance in order to broadcast the current configuraiton for this + * instance in order to broadcast the current configuration for this * master, and to advertise the existence of this Sentinel at the same time. * * The message has the following format: @@ -3271,7 +3271,7 @@ void sentinelInfoCommand(client *c) { addReplyBulkSds(c, info); } -/* Implements Sentinel verison of the ROLE command. The output is +/* Implements Sentinel version of the ROLE command. The output is * "sentinel" and the list of currently monitored master names. */ void sentinelRoleCommand(client *c) { dictIterator *di; @@ -3413,7 +3413,7 @@ void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) { if (ri->link->cc && (mstime() - ri->link->cc_conn_time) > SENTINEL_MIN_LINK_RECONNECT_PERIOD && - ri->link->act_ping_time != 0 && /* Ther is a pending ping... */ + ri->link->act_ping_time != 0 && /* There is a pending ping... */ /* The pending ping is delayed, and we did not received * error replies as well. */ (mstime() - ri->link->act_ping_time) > (ri->down_after_period/2) && @@ -3601,7 +3601,7 @@ void sentinelSimFailureCrash(void) { } /* Vote for the sentinel with 'req_runid' or return the old vote if already - * voted for the specifed 'req_epoch' or one greater. + * voted for the specified 'req_epoch' or one greater. * * If a vote is not available returns NULL, otherwise return the Sentinel * runid and populate the leader_epoch with the epoch of the vote. */ @@ -3752,7 +3752,7 @@ int sentinelSendSlaveOf(sentinelRedisInstance *ri, char *host, int port) { /* In order to send SLAVEOF in a safe way, we send a transaction performing * the following tasks: * 1) Reconfigure the instance according to the specified host/port params. - * 2) Rewrite the configuraiton. + * 2) Rewrite the configuration. * 3) Disconnect all clients (but this one sending the commnad) in order * to trigger the ask-master-on-reconnection protocol for connected * clients. diff --git a/src/server.c b/src/server.c index 2fff8c74f..aa46504ba 100644 --- a/src/server.c +++ b/src/server.c @@ -2239,7 +2239,7 @@ void call(client *c, int flags) { if (c->flags & CLIENT_FORCE_AOF) propagate_flags |= PROPAGATE_AOF; /* However prevent AOF / replication propagation if the command - * implementatino called preventCommandPropagation() or similar, + * implementations called preventCommandPropagation() or similar, * or if we don't have the call() flags to do so. */ if (c->flags & CLIENT_PREVENT_REPL_PROP || !(flags & CMD_CALL_PROPAGATE_REPL)) @@ -3737,7 +3737,7 @@ int main(int argc, char **argv) { configfile = argv[j]; server.configfile = getAbsolutePath(configfile); /* Replace the config file in server.exec_argv with - * its absoulte path. */ + * its absolute path. */ zfree(server.exec_argv[j]); server.exec_argv[j] = zstrdup(server.configfile); j++; diff --git a/src/server.h b/src/server.h index a32809d45..664d4e0f3 100644 --- a/src/server.h +++ b/src/server.h @@ -1523,11 +1523,11 @@ void receiveChildInfo(void); #define ZADD_NONE 0 #define ZADD_INCR (1<<0) /* Increment the score instead of setting it. */ #define ZADD_NX (1<<1) /* Don't touch elements not already existing. */ -#define ZADD_XX (1<<2) /* Only touch elements already exisitng. */ +#define ZADD_XX (1<<2) /* Only touch elements already existing. */ /* Output flags. */ #define ZADD_NOP (1<<3) /* Operation not performed because of conditionals.*/ -#define ZADD_NAN (1<<4) /* Only touch elements already exisitng. */ +#define ZADD_NAN (1<<4) /* Only touch elements already existing. */ #define ZADD_ADDED (1<<5) /* The element was new and was added. */ #define ZADD_UPDATED (1<<6) /* The element already existed, score updated. */ diff --git a/src/sort.c b/src/sort.c index 7ddd37d95..668e5fad2 100644 --- a/src/sort.c +++ b/src/sort.c @@ -193,7 +193,7 @@ void sortCommand(client *c) { long limit_start = 0, limit_count = -1, start, end; int j, dontsort = 0, vectorlen; int getop = 0; /* GET operation counter */ - int int_convertion_error = 0; + int int_conversion_error = 0; int syntax_error = 0; robj *sortval, *sortby = NULL, *storekey = NULL; redisSortObject *vector; /* Resulting vector to sort */ @@ -469,7 +469,7 @@ void sortCommand(client *c) { if (eptr[0] != '\0' || errno == ERANGE || isnan(vector[j].u.score)) { - int_convertion_error = 1; + int_conversion_error = 1; } } else if (byval->encoding == OBJ_ENCODING_INT) { /* Don't need to decode the object if it's @@ -503,7 +503,7 @@ void sortCommand(client *c) { /* Send command output to the output buffer, performing the specified * GET/DEL/INCR/DECR operations if any. */ outputlen = getop ? getop*(end-start+1) : end-start+1; - if (int_convertion_error) { + if (int_conversion_error) { addReplyError(c,"One or more scores can't be converted into double"); } else if (storekey == NULL) { /* STORE option not specified, sent the sorting result to client */ diff --git a/src/t_zset.c b/src/t_zset.c index f7f4c6eb2..0d54fc3a7 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -507,7 +507,7 @@ static int zslParseRange(robj *min, robj *max, zrangespec *spec) { * + means the max string possible * * If the string is valid the *dest pointer is set to the redis object - * that will be used for the comparision, and ex will be set to 0 or 1 + * that will be used for the comparison, and ex will be set to 0 or 1 * respectively if the item is exclusive or inclusive. C_OK will be * returned. * diff --git a/src/util.c b/src/util.c index 8d68f0bb1..59152a8d9 100644 --- a/src/util.c +++ b/src/util.c @@ -451,7 +451,7 @@ int string2ld(const char *s, size_t slen, long double *dp) { /* Convert a double to a string representation. Returns the number of bytes * required. The representation should always be parsable by strtod(3). * This function does not support human-friendly formatting like ld2string - * does. It is intented mainly to be used inside t_zset.c when writing scores + * does. It is intended mainly to be used inside t_zset.c when writing scores * into a ziplist representing a sorted set. */ int d2string(char *buf, size_t len, double value) { if (isnan(value)) { diff --git a/src/ziplist.c b/src/ziplist.c index e407937ff..5c36e8e10 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -269,7 +269,7 @@ * Note that this is not how the data is actually encoded, is just what we * get filled by a function in order to operate more easily. */ typedef struct zlentry { - unsigned int prevrawlensize; /* Bytes used to encode the previos entry len*/ + unsigned int prevrawlensize; /* Bytes used to encode the previous entry len*/ unsigned int prevrawlen; /* Previous entry len. */ unsigned int lensize; /* Bytes used to encode this entry type/len. For example strings have a 1, 2 or 5 bytes @@ -431,7 +431,7 @@ unsigned int zipStorePrevEntryLength(unsigned char *p, unsigned int len) { /* Return the length of the previous element, and the number of bytes that * are used in order to encode the previous element length. * 'ptr' must point to the prevlen prefix of an entry (that encodes the - * length of the previos entry in order to navigate the elements backward). + * length of the previous entry in order to navigate the elements backward). * The length of the previous entry is stored in 'prevlen', the number of * bytes needed to encode the previous entry length are stored in * 'prevlensize'. */ diff --git a/src/zmalloc.c b/src/zmalloc.c index 094dd80fa..f63e8a93e 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -353,7 +353,7 @@ size_t zmalloc_get_private_dirty(long pid) { } /* Returns the size of physical memory (RAM) in bytes. - * It looks ugly, but this is the cleanest way to achive cross platform results. + * It looks ugly, but this is the cleanest way to achieve cross platform results. * Cleaned up from: * * http://nadeausoftware.com/articles/2012/09/c_c_tip_how_get_physical_memory_size_system diff --git a/tests/integration/replication-psync.tcl b/tests/integration/replication-psync.tcl index 2b9e13f50..10052f7c1 100644 --- a/tests/integration/replication-psync.tcl +++ b/tests/integration/replication-psync.tcl @@ -11,7 +11,7 @@ proc stop_bg_complex_data {handle} { # partial resyncs attempts, all this while flooding the master with # write queries. # -# You can specifiy backlog size, ttl, delay before reconnection, test duration +# You can specify backlog size, ttl, delay before reconnection, test duration # in seconds, and an additional condition to verify at the end. # # If reconnect is > 0, the test actually try to break the connection and diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl index f5a29a096..66608f571 100644 --- a/tests/unit/dump.tcl +++ b/tests/unit/dump.tcl @@ -246,7 +246,7 @@ start_server {tags {"dump"}} { set e } {*empty string*} - test {MIGRATE with mutliple keys migrate just existing ones} { + test {MIGRATE with multiple keys migrate just existing ones} { set first [srv 0 client] r set key1 "v1" r set key2 "v2" diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl index eddc7c303..de24eabed 100644 --- a/tests/unit/expire.tcl +++ b/tests/unit/expire.tcl @@ -121,7 +121,7 @@ start_server {tags {"expire"}} { list $a $b } {somevalue {}} - test {TTL returns tiem to live in seconds} { + test {TTL returns time to live in seconds} { r del x r setex x 10 somevalue set ttl [r ttl x] diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index be82e1559..363678e38 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -516,7 +516,7 @@ start_server {tags {"scripting"}} { # Note: keep this test at the end of this server stanza because it # kills the server. test {SHUTDOWN NOSAVE can kill a timedout script anyway} { - # The server sould be still unresponding to normal commands. + # The server should be still unresponding to normal commands. catch {r ping} e assert_match {BUSY*} $e catch {r shutdown nosave} diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl index 82f76befe..c291e06e0 100644 --- a/tests/unit/type/zset.tcl +++ b/tests/unit/type/zset.tcl @@ -84,7 +84,7 @@ start_server {tags {"zset"}} { set err } {ERR*} - test "ZADD NX with non exisitng key" { + test "ZADD NX with non existing key" { r del ztmp r zadd ztmp nx 10 x 20 y 30 z assert {[r zcard ztmp] == 3} diff --git a/utils/hashtable/README b/utils/hashtable/README index e2862f012..87a76c9a5 100644 --- a/utils/hashtable/README +++ b/utils/hashtable/README @@ -5,7 +5,7 @@ rehashing.c Visually show buckets in the two hash tables between rehashings. Also stress test getRandomKeys() implementation, that may actually disappear from -Redis soon, however visualizaiton some code is reusable in new bugs +Redis soon, however visualization some code is reusable in new bugs investigation. Compile with: