Fix some spell problems

This commit is contained in:
JackDrogon 2017-06-30 14:22:31 +08:00
parent 01a4b9892d
commit b9385b2cf1
41 changed files with 79 additions and 79 deletions

View File

@ -637,7 +637,7 @@ slave-priority 100
# it with the specified string.
# 4) During replication, when a slave performs a full resynchronization with
# its master, the content of the whole database is removed in order to
# load the RDB file just transfered.
# load the RDB file just transferred.
#
# In all the above cases the default is to delete objects in a blocking way,
# like if DEL was called. However you can configure each case specifically

View File

@ -640,7 +640,7 @@ int loadAppendOnlyFile(char *filename) {
exit(1);
}
/* Handle a zero-length AOF file as a special case. An emtpy AOF file
/* Handle a zero-length AOF file as a special case. An empty AOF file
* is a valid AOF because an empty server with AOF enabled will create
* a zero length file at startup, that will remain like that if no write
* operation is received. */
@ -1560,7 +1560,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
"Background AOF rewrite signal handler took %lldus", ustime()-now);
} else if (!bysignal && exitcode != 0) {
/* SIGUSR1 is whitelisted, so we have a way to kill a child without
* tirggering an error conditon. */
* tirggering an error condition. */
if (bysignal != SIGUSR1)
server.aof_lastbgrewrite_status = C_ERR;
serverLog(LL_WARNING,

View File

@ -16,7 +16,7 @@
* pthread_mutex_t myvar_mutex;
* atomicSet(myvar,12345);
*
* If atomic primitives are availble (tested in config.h) the mutex
* If atomic primitives are available (tested in config.h) the mutex
* is not used.
*
* Never use return value from the macros, instead use the AtomicGetIncr()

View File

@ -918,7 +918,7 @@ void bitfieldCommand(client *c) {
struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */
int owtype = BFOVERFLOW_WRAP; /* Overflow type. */
int readonly = 1;
size_t higest_write_offset = 0;
size_t highest_write_offset = 0;
for (j = 2; j < c->argc; j++) {
int remargs = c->argc-j-1; /* Remaining args other than current. */
@ -968,8 +968,8 @@ void bitfieldCommand(client *c) {
if (opcode != BITFIELDOP_GET) {
readonly = 0;
if (higest_write_offset < bitoffset + bits - 1)
higest_write_offset = bitoffset + bits - 1;
if (highest_write_offset < bitoffset + bits - 1)
highest_write_offset = bitoffset + bits - 1;
/* INCRBY and SET require another argument. */
if (getLongLongFromObjectOrReply(c,c->argv[j+3],&i64,NULL) != C_OK){
zfree(ops);
@ -999,7 +999,7 @@ void bitfieldCommand(client *c) {
/* Lookup by making room up to the farest bit reached by
* this operation. */
if ((o = lookupStringForBitCommand(c,
higest_write_offset)) == NULL) return;
highest_write_offset)) == NULL) return;
}
addReplyMultiBulkLen(c,numops);

View File

@ -2989,7 +2989,7 @@ void clusterHandleSlaveFailover(void) {
(unsigned long long) myself->configEpoch);
}
/* Take responsability for the cluster slots. */
/* Take responsibility for the cluster slots. */
clusterFailoverReplaceYourMaster();
} else {
clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES);
@ -3040,11 +3040,11 @@ void clusterHandleSlaveMigration(int max_slaves) {
!nodeTimedOut(mymaster->slaves[j])) okslaves++;
if (okslaves <= server.cluster_migration_barrier) return;
/* Step 3: Idenitfy a candidate for migration, and check if among the
/* Step 3: Identify a candidate for migration, and check if among the
* masters with the greatest number of ok slaves, I'm the one with the
* smallest node ID (the "candidate slave").
*
* Note: this means that eventually a replica migration will occurr
* Note: this means that eventually a replica migration will occur
* since slaves that are reachable again always have their FAIL flag
* cleared, so eventually there must be a candidate. At the same time
* this does not mean that there are no race conditions possible (two
@ -3620,7 +3620,7 @@ void clusterCloseAllSlots(void) {
* -------------------------------------------------------------------------- */
/* The following are defines that are only used in the evaluation function
* and are based on heuristics. Actaully the main point about the rejoin and
* and are based on heuristics. Actually the main point about the rejoin and
* writable delay is that they should be a few orders of magnitude larger
* than the network latency. */
#define CLUSTER_MAX_REJOIN_DELAY 5000
@ -5376,7 +5376,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co
if (error_code == CLUSTER_REDIR_CROSS_SLOT) {
addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n"));
} else if (error_code == CLUSTER_REDIR_UNSTABLE) {
/* The request spawns mutliple keys in the same slot,
/* The request spawns multiple keys in the same slot,
* but the slot is not "stable" currently as there is
* a migration or import in progress. */
addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n"));

View File

@ -230,7 +230,7 @@ union clusterMsgData {
#define CLUSTER_PROTO_VER 1 /* Cluster bus protocol version. */
typedef struct {
char sig[4]; /* Siganture "RCmb" (Redis Cluster message bus). */
char sig[4]; /* Signature "RCmb" (Redis Cluster message bus). */
uint32_t totlen; /* Total length of this message */
uint16_t ver; /* Protocol version, currently set to 1. */
uint16_t port; /* TCP base port number. */

View File

@ -106,7 +106,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) {
* safety measure, the command invoked is a read-only command, we can
* safely return NULL here, and provide a more consistent behavior
* to clients accessign expired values in a read-only fashion, that
* will say the key as non exisitng.
* will say the key as non existing.
*
* Notably this covers GETs when slaves are used to scale reads. */
if (server.current_client &&
@ -296,7 +296,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
* If callback is given the function is called from time to time to
* signal that work is in progress.
*
* The dbnum can be -1 if all teh DBs should be flushed, or the specified
* The dbnum can be -1 if all the DBs should be flushed, or the specified
* DB number if we want to flush only a single Redis database number.
*
* Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or

View File

@ -1012,7 +1012,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
"Redis %s crashed by signal: %d", REDIS_VERSION, sig);
if (eip != NULL) {
serverLog(LL_WARNING,
"Crashed running the instuction at: %p", eip);
"Crashed running the instruction at: %p", eip);
}
if (sig == SIGSEGV || sig == SIGBUS) {
serverLog(LL_WARNING,

View File

@ -558,7 +558,7 @@ void activeDefragCycle(void) {
cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db);
/* Once in 16 scan iterations, or 1000 pointer reallocations
* (if we have a lot of pointers in one hash bucket), check if we
* reached the tiem limit. */
* reached the time limit. */
if (cursor && (++iterations > 16 || server.stat_active_defrag_hits - defragged > 1000)) {
if ((ustime() - start) > timelimit) {
return;

View File

@ -327,7 +327,7 @@ int dictReplace(dict *d, void *key, void *val)
dictEntry *entry, *existing, auxentry;
/* Try to add the element. If the key
* does not exists dictAdd will suceed. */
* does not exists dictAdd will succeed. */
entry = dictAddRaw(d,key,&existing);
if (entry) {
dictSetVal(d, entry, val);

View File

@ -43,7 +43,7 @@ uint16_t intrev16(uint16_t v);
uint32_t intrev32(uint32_t v);
uint64_t intrev64(uint64_t v);
/* variants of the function doing the actual convertion only if the target
/* variants of the function doing the actual conversion only if the target
* host is big endian */
#if (BYTE_ORDER == LITTLE_ENDIAN)
#define memrev16ifbe(p)

View File

@ -145,7 +145,7 @@ double extractUnitOrReply(client *c, robj *unit) {
/* Input Argument Helper.
* Extract the dinstance from the specified two arguments starting at 'argv'
* that shouldbe in the form: <number> <unit> and return the dinstance in the
* specified unit on success. *conversino is populated with the coefficient
* specified unit on success. *conversions is populated with the coefficient
* to use in order to convert meters to the unit.
*
* On error a value less than zero is returned. */

View File

@ -665,7 +665,7 @@ int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) {
end = p + sdslen(o->ptr) - HLL_HDR_SIZE;
first = 0;
prev = NULL; /* Points to previos opcode at the end of the loop. */
prev = NULL; /* Points to previous opcode at the end of the loop. */
next = NULL; /* Points to the next opcode at the end of the loop. */
span = 0;
while(p < end) {
@ -756,7 +756,7 @@ int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) {
* and is either currently represented by a VAL opcode with len > 1,
* by a ZERO opcode with len > 1, or by an XZERO opcode.
*
* In those cases the original opcode must be split into muliple
* In those cases the original opcode must be split into multiple
* opcodes. The worst case is an XZERO split in the middle resuling into
* XZERO - VAL - XZERO, so the resulting sequence max length is
* 5 bytes.
@ -879,7 +879,7 @@ promote: /* Promote to dense representation. */
*
* Note that this in turn means that PFADD will make sure the command
* is propagated to slaves / AOF, so if there is a sparse -> dense
* convertion, it will be performed in all the slaves as well. */
* conversion, it will be performed in all the slaves as well. */
int dense_retval = hllDenseAdd(hdr->registers, ele, elesize);
serverAssert(dense_retval == 1);
return dense_retval;

View File

@ -151,7 +151,7 @@ int latencyResetEvent(char *event_to_reset) {
/* ------------------------ Latency reporting (doctor) ---------------------- */
/* Analyze the samples avaialble for a given event and return a structure
/* Analyze the samples available for a given event and return a structure
* populate with different metrics, average, MAD, min, max, and so forth.
* Check latency.h definition of struct latenctStat for more info.
* If the specified event has no elements the structure is populate with

View File

@ -23,10 +23,10 @@ size_t lazyfreeGetPendingObjectsCount(void) {
* the function just returns the number of elements the object is composed of.
*
* Objects composed of single allocations are always reported as having a
* single item even if they are actaully logical composed of multiple
* single item even if they are actually logical composed of multiple
* elements.
*
* For lists the funciton returns the number of elements in the quicklist
* For lists the function returns the number of elements in the quicklist
* representing the list. */
size_t lazyfreeGetFreeEffort(robj *obj) {
if (obj->type == OBJ_LIST) {

View File

@ -3520,7 +3520,7 @@ void moduleInitModulesSystem(void) {
* because the server must be fully initialized before loading modules.
*
* The function aborts the server on errors, since to start with missing
* modules is not considered sane: clients may rely on the existance of
* modules is not considered sane: clients may rely on the existence of
* given commands, loading AOF also may need some modules to exist, and
* if this instance is a slave, it must understand commands from master. */
void moduleLoadFromQueue(void) {

View File

@ -535,7 +535,7 @@ both modes. Currently a key opened for writing can also be accessed for reading
but this is to be considered an implementation detail. The right mode should
be used in sane modules.
You can open non exisitng keys for writing, since the keys will be created
You can open non existing keys for writing, since the keys will be created
when an attempt to write to the key is performed. However when opening keys
just for reading, `RedisModule_OpenKey` will return NULL if the key does not
exist.
@ -664,7 +664,7 @@ is used. Example:
RedisModule_StringTruncate(mykey,1024);
The function truncates, or enlarges the string as needed, padding it with
zero bytes if the previos length is smaller than the new length we request.
zero bytes if the previous length is smaller than the new length we request.
If the string does not exist since `key` is associated to an open empty key,
a string value is created and associated to the key.

View File

@ -1,5 +1,5 @@
# gendoc.rb -- Converts the top-comments inside module.c to modules API
# reference documentaiton in markdown format.
# reference documentation in markdown format.
# Convert the C comment to markdown
def markdown(s)

View File

@ -1636,7 +1636,7 @@ int quicklistTest(int argc, char *argv[]) {
TEST("add to tail of empty list") {
quicklist *ql = quicklistNew(-2, options[_i]);
quicklistPushTail(ql, "hello", 6);
/* 1 for head and 1 for tail beacuse 1 node = head = tail */
/* 1 for head and 1 for tail because 1 node = head = tail */
ql_verify(ql, 1, 1, 1, 1);
quicklistRelease(ql);
}
@ -1644,7 +1644,7 @@ int quicklistTest(int argc, char *argv[]) {
TEST("add to head of empty list") {
quicklist *ql = quicklistNew(-2, options[_i]);
quicklistPushHead(ql, "hello", 6);
/* 1 for head and 1 for tail beacuse 1 node = head = tail */
/* 1 for head and 1 for tail because 1 node = head = tail */
ql_verify(ql, 1, 1, 1, 1);
quicklistRelease(ql);
}

View File

@ -448,7 +448,7 @@ int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) {
/* If the node we stopped at is a compressed node, we need to
* split it before to continue.
*
* Splitting a compressed node have a few possibile cases.
* Splitting a compressed node have a few possible cases.
* Imagine that the node 'h' we are currently at is a compressed
* node contaning the string "ANNIBALE" (it means that it represents
* nodes A -> N -> N -> I -> B -> A -> L -> E with the only child
@ -730,7 +730,7 @@ int raxInsert(rax *rax, unsigned char *s, size_t len, void *data, void **old) {
cp = raxNodeLastChildPtr(trimmed);
memcpy(cp,&postfix,sizeof(postfix));
/* Finish! We don't need to contine with the insertion
/* Finish! We don't need to continue with the insertion
* algorithm for ALGO 2. The key is already inserted. */
rax->numele++;
rax_free(h);

View File

@ -94,7 +94,7 @@ typedef struct raxNode {
*
* If the node has an associated key (iskey=1) and is not NULL
* (isnull=0), then after the raxNode pointers poiting to the
* childen, an additional value pointer is present (as you can see
* children, an additional value pointer is present (as you can see
* in the representation above as "value-ptr" field).
*/
unsigned char data[];

View File

@ -1684,7 +1684,7 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) {
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency);
/* SIGUSR1 is whitelisted, so we have a way to kill a child without
* tirggering an error conditon. */
* tirggering an error condition. */
if (bysignal != SIGUSR1)
server.lastbgsave_status = C_ERR;
}
@ -1721,7 +1721,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
* in error state.
*
* If the process returned an error, consider the list of slaves that
* can continue to be emtpy, so that it's just a special case of the
* can continue to be empty, so that it's just a special case of the
* normal code path. */
ok_slaves = zmalloc(sizeof(uint64_t)); /* Make space for the count. */
ok_slaves[0] = 0;

View File

@ -2058,15 +2058,15 @@ static void getKeySizes(redisReply *keys, int *types,
keys->element[i]->str);
}
/* Retreive sizes */
/* Retrieve sizes */
for(i=0;i<keys->elements;i++) {
/* Skip keys that dissapeared between SCAN and TYPE */
/* Skip keys that disappeared between SCAN and TYPE */
if(types[i] == TYPE_NONE) {
sizes[i] = 0;
continue;
}
/* Retreive size */
/* Retrieve size */
if(redisGetReply(context, (void**)&reply)!=REDIS_OK) {
fprintf(stderr, "Error getting size for key '%s' (%d: %s)\n",
keys->element[i]->str, context->err, context->errstr);
@ -2136,7 +2136,7 @@ static void findBigKeys(void) {
arrsize = keys->elements;
}
/* Retreive types and then sizes */
/* Retrieve types and then sizes */
getKeyTypes(keys, types);
getKeySizes(keys, types, sizes);

View File

@ -454,7 +454,7 @@ class RedisTrib
# Handle case "1": keys in no node.
if none.length > 0
xputs "The folowing uncovered slots have no keys across the cluster:"
xputs "The following uncovered slots have no keys across the cluster:"
xputs none.keys.join(",")
yes_or_die "Fix these slots by covering with a random node?"
none.each{|slot,nodes|
@ -466,7 +466,7 @@ class RedisTrib
# Handle case "2": keys only in one node.
if single.length > 0
xputs "The folowing uncovered slots have keys in just one node:"
xputs "The following uncovered slots have keys in just one node:"
puts single.keys.join(",")
yes_or_die "Fix these slots by covering with those nodes?"
single.each{|slot,nodes|
@ -477,7 +477,7 @@ class RedisTrib
# Handle case "3": keys in multiple nodes.
if multi.length > 0
xputs "The folowing uncovered slots have keys in multiple nodes:"
xputs "The following uncovered slots have keys in multiple nodes:"
xputs multi.keys.join(",")
yes_or_die "Fix these slots by moving keys into a single node?"
multi.each{|slot,nodes|
@ -1102,7 +1102,7 @@ class RedisTrib
if numslots > 0
puts "Moving #{numslots} slots from #{src} to #{dst}"
# Actaully move the slots.
# Actually move the slots.
reshard_table = compute_reshard_table([src],numslots)
if reshard_table.length != numslots
xputs "*** Assertio failed: Reshard table != number of slots"
@ -1622,7 +1622,7 @@ private
]
end
# Turn a key name into the corrisponding Redis Cluster slot.
# Turn a key name into the corresponding Redis Cluster slot.
def key_to_slot(key)
# Only hash what is inside {...} if there is such a pattern in the key.
# Note that the specification requires the content that is between

View File

@ -1,4 +1,4 @@
/* redisassert.h -- Drop in replacemnet assert.h that prints the stack trace
/* redisassert.h -- Drop in replacements assert.h that prints the stack trace
* in the Redis logs.
*
* This file should be included instead of "assert.h" inside libraries used by

View File

@ -553,7 +553,7 @@ need_full_resync:
* Side effects, other than starting a BGSAVE:
*
* 1) Handle the slaves in WAIT_START state, by preparing them for a full
* sync if the BGSAVE was succesfully started, or sending them an error
* sync if the BGSAVE was successfully started, or sending them an error
* and dropping them from the list of slaves.
*
* 2) Flush the Lua scripting script cache if the BGSAVE was actually
@ -895,7 +895,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) {
}
}
/* If the preamble was already transfered, send the RDB bulk data. */
/* If the preamble was already transferred, send the RDB bulk data. */
lseek(slave->repldbfd,slave->repldboff,SEEK_SET);
buflen = read(slave->repldbfd,buf,PROTO_IOBUF_LEN);
if (buflen <= 0) {
@ -964,7 +964,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) {
replicationGetSlaveName(slave));
/* Note: we wait for a REPLCONF ACK message from slave in
* order to really put it online (install the write handler
* so that the accumulated data can be transfered). However
* so that the accumulated data can be transferred). However
* we change the replication state ASAP, since our slave
* is technically online now. */
slave->replstate = SLAVE_STATE_ONLINE;
@ -1047,7 +1047,7 @@ int slaveIsInHandshakeState(void) {
/* Avoid the master to detect the slave is timing out while loading the
* RDB file in initial synchronization. We send a single newline character
* that is valid protocol but is guaranteed to either be sent entierly or
* that is valid protocol but is guaranteed to either be sent entirely or
* not, since the byte is indivisible.
*
* The function is called in two contexts: while we flush the current
@ -1387,7 +1387,7 @@ char *sendSynchronousCommand(int flags, int fd, ...) {
*
* The function returns:
*
* PSYNC_CONTINUE: If the PSYNC command succeded and we can continue.
* PSYNC_CONTINUE: If the PSYNC command succeeded and we can continue.
* PSYNC_FULLRESYNC: If PSYNC is supported but a full resync is needed.
* In this case the master run_id and global replication
* offset is saved.
@ -2100,7 +2100,7 @@ void replicationSendAck(void) {
* functions. */
/* This function is called by freeClient() in order to cache the master
* client structure instead of destryoing it. freeClient() will return
* client structure instead of destroying it. freeClient() will return
* ASAP after this function returns, so every action needed to avoid problems
* with a client that is really "suspended" has to be done by this function.
*

View File

@ -271,7 +271,7 @@ sds sdsRemoveFreeSpace(sds s) {
return s;
}
/* Return the total size of the allocation of the specifed sds string,
/* Return the total size of the allocation of the specified sds string,
* including:
* 1) The sds header before the pointer.
* 2) The string.

View File

@ -494,7 +494,7 @@ void sentinelIsRunning(void) {
if (sentinel.myid[j] != 0) break;
if (j == CONFIG_RUN_ID_SIZE) {
/* Pick ID and presist the config. */
/* Pick ID and persist the config. */
getRandomHexChars(sentinel.myid,CONFIG_RUN_ID_SIZE);
sentinelFlushConfig();
}
@ -2452,7 +2452,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd
}
/* Send an "Hello" message via Pub/Sub to the specified 'ri' Redis
* instance in order to broadcast the current configuraiton for this
* instance in order to broadcast the current configuration for this
* master, and to advertise the existence of this Sentinel at the same time.
*
* The message has the following format:
@ -3271,7 +3271,7 @@ void sentinelInfoCommand(client *c) {
addReplyBulkSds(c, info);
}
/* Implements Sentinel verison of the ROLE command. The output is
/* Implements Sentinel version of the ROLE command. The output is
* "sentinel" and the list of currently monitored master names. */
void sentinelRoleCommand(client *c) {
dictIterator *di;
@ -3413,7 +3413,7 @@ void sentinelCheckSubjectivelyDown(sentinelRedisInstance *ri) {
if (ri->link->cc &&
(mstime() - ri->link->cc_conn_time) >
SENTINEL_MIN_LINK_RECONNECT_PERIOD &&
ri->link->act_ping_time != 0 && /* Ther is a pending ping... */
ri->link->act_ping_time != 0 && /* There is a pending ping... */
/* The pending ping is delayed, and we did not received
* error replies as well. */
(mstime() - ri->link->act_ping_time) > (ri->down_after_period/2) &&
@ -3601,7 +3601,7 @@ void sentinelSimFailureCrash(void) {
}
/* Vote for the sentinel with 'req_runid' or return the old vote if already
* voted for the specifed 'req_epoch' or one greater.
* voted for the specified 'req_epoch' or one greater.
*
* If a vote is not available returns NULL, otherwise return the Sentinel
* runid and populate the leader_epoch with the epoch of the vote. */
@ -3752,7 +3752,7 @@ int sentinelSendSlaveOf(sentinelRedisInstance *ri, char *host, int port) {
/* In order to send SLAVEOF in a safe way, we send a transaction performing
* the following tasks:
* 1) Reconfigure the instance according to the specified host/port params.
* 2) Rewrite the configuraiton.
* 2) Rewrite the configuration.
* 3) Disconnect all clients (but this one sending the commnad) in order
* to trigger the ask-master-on-reconnection protocol for connected
* clients.

View File

@ -2239,7 +2239,7 @@ void call(client *c, int flags) {
if (c->flags & CLIENT_FORCE_AOF) propagate_flags |= PROPAGATE_AOF;
/* However prevent AOF / replication propagation if the command
* implementatino called preventCommandPropagation() or similar,
* implementations called preventCommandPropagation() or similar,
* or if we don't have the call() flags to do so. */
if (c->flags & CLIENT_PREVENT_REPL_PROP ||
!(flags & CMD_CALL_PROPAGATE_REPL))
@ -3737,7 +3737,7 @@ int main(int argc, char **argv) {
configfile = argv[j];
server.configfile = getAbsolutePath(configfile);
/* Replace the config file in server.exec_argv with
* its absoulte path. */
* its absolute path. */
zfree(server.exec_argv[j]);
server.exec_argv[j] = zstrdup(server.configfile);
j++;

View File

@ -1523,11 +1523,11 @@ void receiveChildInfo(void);
#define ZADD_NONE 0
#define ZADD_INCR (1<<0) /* Increment the score instead of setting it. */
#define ZADD_NX (1<<1) /* Don't touch elements not already existing. */
#define ZADD_XX (1<<2) /* Only touch elements already exisitng. */
#define ZADD_XX (1<<2) /* Only touch elements already existing. */
/* Output flags. */
#define ZADD_NOP (1<<3) /* Operation not performed because of conditionals.*/
#define ZADD_NAN (1<<4) /* Only touch elements already exisitng. */
#define ZADD_NAN (1<<4) /* Only touch elements already existing. */
#define ZADD_ADDED (1<<5) /* The element was new and was added. */
#define ZADD_UPDATED (1<<6) /* The element already existed, score updated. */

View File

@ -193,7 +193,7 @@ void sortCommand(client *c) {
long limit_start = 0, limit_count = -1, start, end;
int j, dontsort = 0, vectorlen;
int getop = 0; /* GET operation counter */
int int_convertion_error = 0;
int int_conversion_error = 0;
int syntax_error = 0;
robj *sortval, *sortby = NULL, *storekey = NULL;
redisSortObject *vector; /* Resulting vector to sort */
@ -469,7 +469,7 @@ void sortCommand(client *c) {
if (eptr[0] != '\0' || errno == ERANGE ||
isnan(vector[j].u.score))
{
int_convertion_error = 1;
int_conversion_error = 1;
}
} else if (byval->encoding == OBJ_ENCODING_INT) {
/* Don't need to decode the object if it's
@ -503,7 +503,7 @@ void sortCommand(client *c) {
/* Send command output to the output buffer, performing the specified
* GET/DEL/INCR/DECR operations if any. */
outputlen = getop ? getop*(end-start+1) : end-start+1;
if (int_convertion_error) {
if (int_conversion_error) {
addReplyError(c,"One or more scores can't be converted into double");
} else if (storekey == NULL) {
/* STORE option not specified, sent the sorting result to client */

View File

@ -507,7 +507,7 @@ static int zslParseRange(robj *min, robj *max, zrangespec *spec) {
* + means the max string possible
*
* If the string is valid the *dest pointer is set to the redis object
* that will be used for the comparision, and ex will be set to 0 or 1
* that will be used for the comparison, and ex will be set to 0 or 1
* respectively if the item is exclusive or inclusive. C_OK will be
* returned.
*

View File

@ -451,7 +451,7 @@ int string2ld(const char *s, size_t slen, long double *dp) {
/* Convert a double to a string representation. Returns the number of bytes
* required. The representation should always be parsable by strtod(3).
* This function does not support human-friendly formatting like ld2string
* does. It is intented mainly to be used inside t_zset.c when writing scores
* does. It is intended mainly to be used inside t_zset.c when writing scores
* into a ziplist representing a sorted set. */
int d2string(char *buf, size_t len, double value) {
if (isnan(value)) {

View File

@ -269,7 +269,7 @@
* Note that this is not how the data is actually encoded, is just what we
* get filled by a function in order to operate more easily. */
typedef struct zlentry {
unsigned int prevrawlensize; /* Bytes used to encode the previos entry len*/
unsigned int prevrawlensize; /* Bytes used to encode the previous entry len*/
unsigned int prevrawlen; /* Previous entry len. */
unsigned int lensize; /* Bytes used to encode this entry type/len.
For example strings have a 1, 2 or 5 bytes
@ -431,7 +431,7 @@ unsigned int zipStorePrevEntryLength(unsigned char *p, unsigned int len) {
/* Return the length of the previous element, and the number of bytes that
* are used in order to encode the previous element length.
* 'ptr' must point to the prevlen prefix of an entry (that encodes the
* length of the previos entry in order to navigate the elements backward).
* length of the previous entry in order to navigate the elements backward).
* The length of the previous entry is stored in 'prevlen', the number of
* bytes needed to encode the previous entry length are stored in
* 'prevlensize'. */

View File

@ -353,7 +353,7 @@ size_t zmalloc_get_private_dirty(long pid) {
}
/* Returns the size of physical memory (RAM) in bytes.
* It looks ugly, but this is the cleanest way to achive cross platform results.
* It looks ugly, but this is the cleanest way to achieve cross platform results.
* Cleaned up from:
*
* http://nadeausoftware.com/articles/2012/09/c_c_tip_how_get_physical_memory_size_system

View File

@ -11,7 +11,7 @@ proc stop_bg_complex_data {handle} {
# partial resyncs attempts, all this while flooding the master with
# write queries.
#
# You can specifiy backlog size, ttl, delay before reconnection, test duration
# You can specify backlog size, ttl, delay before reconnection, test duration
# in seconds, and an additional condition to verify at the end.
#
# If reconnect is > 0, the test actually try to break the connection and

View File

@ -246,7 +246,7 @@ start_server {tags {"dump"}} {
set e
} {*empty string*}
test {MIGRATE with mutliple keys migrate just existing ones} {
test {MIGRATE with multiple keys migrate just existing ones} {
set first [srv 0 client]
r set key1 "v1"
r set key2 "v2"

View File

@ -121,7 +121,7 @@ start_server {tags {"expire"}} {
list $a $b
} {somevalue {}}
test {TTL returns tiem to live in seconds} {
test {TTL returns time to live in seconds} {
r del x
r setex x 10 somevalue
set ttl [r ttl x]

View File

@ -516,7 +516,7 @@ start_server {tags {"scripting"}} {
# Note: keep this test at the end of this server stanza because it
# kills the server.
test {SHUTDOWN NOSAVE can kill a timedout script anyway} {
# The server sould be still unresponding to normal commands.
# The server should be still unresponding to normal commands.
catch {r ping} e
assert_match {BUSY*} $e
catch {r shutdown nosave}

View File

@ -84,7 +84,7 @@ start_server {tags {"zset"}} {
set err
} {ERR*}
test "ZADD NX with non exisitng key" {
test "ZADD NX with non existing key" {
r del ztmp
r zadd ztmp nx 10 x 20 y 30 z
assert {[r zcard ztmp] == 3}

View File

@ -5,7 +5,7 @@ rehashing.c
Visually show buckets in the two hash tables between rehashings. Also stress
test getRandomKeys() implementation, that may actually disappear from
Redis soon, however visualizaiton some code is reusable in new bugs
Redis soon, however visualization some code is reusable in new bugs
investigation.
Compile with: