split test suite into multiple files; runs redis-server in isolation

This commit is contained in:
Pieter Noordhuis 2010-05-14 17:31:11 +02:00
parent 758b6d4c27
commit 98578b5704
18 changed files with 2741 additions and 0 deletions

312
test/assets/default.conf Normal file
View File

@ -0,0 +1,312 @@
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specifiy
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile redis.pid
# Accept connections on the specified port, default is 6379
port 6379
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
# bind 127.0.0.1
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 300
# Set server verbosity to 'debug'
# it can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel verbose
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile stdout
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
save 900 1
save 300 10
save 60 10000
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# Also the Append Only File will be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir ./test/tmp
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default there
# is no limit, and it's up to the number of file descriptors the Redis process
# is able to open. The special value '0' means no limits.
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 128
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys with an
# EXPIRE set. It will try to start freeing keys that are going to expire
# in little time and preserve keys with a longer time to live.
# Redis will also try to remove objects from free lists if possible.
#
# If all this fails, Redis will start to reply with errors to commands
# that will use more memory, like SET, LPUSH, and so on, and will continue
# to reply to most read-only commands like GET.
#
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
# 'state' server or cache, not as a real DB. When Redis is used as a real
# database the memory usage will grow over the weeks, it will be obvious if
# it is going to use too much memory in the long run, and you'll have the time
# to upgrade. With maxmemory after the limit is reached you'll start to get
# errors for write operations, and this may even lead to DB inconsistency.
#
# maxmemory <bytes>
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. If you can live
# with the idea that the latest records will be lost if something like a crash
# happens this is the preferred way to run Redis. If instead you care a lot
# about your data and don't want to that a single record can get lost you should
# enable the append only mode: when this mode is enabled Redis will append
# every write operation received in the file appendonly.aof. This file will
# be read on startup in order to rebuild the full dataset in memory.
#
# Note that you can have both the async dumps and the append only file if you
# like (you have to comment the "save" statements above to disable the dumps).
# Still if append only mode is enabled Redis will load the data from the
# log file at startup ignoring the dump.rdb file.
#
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
# log file in background when it gets too big.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only if one second passed since the last fsync. Compromise.
#
# The default is "everysec" that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
################################ VIRTUAL MEMORY ###############################
# Virtual Memory allows Redis to work with datasets bigger than the actual
# amount of RAM needed to hold the whole dataset in memory.
# In order to do so very used keys are taken in memory while the other keys
# are swapped into a swap file, similarly to what operating systems do
# with memory pages.
#
# To enable VM just set 'vm-enabled' to yes, and set the following three
# VM parameters accordingly to your needs.
vm-enabled no
# vm-enabled yes
# This is the path of the Redis swap file. As you can guess, swap files
# can't be shared by different Redis instances, so make sure to use a swap
# file for every redis process you are running. Redis will complain if the
# swap file is already in use.
#
# The best kind of storage for the Redis swap file (that's accessed at random)
# is a Solid State Disk (SSD).
#
# *** WARNING *** if you are using a shared hosting the default of putting
# the swap file under /tmp is not secure. Create a dir with access granted
# only to Redis user and configure Redis to create the swap file there.
vm-swap-file /tmp/redis.swap
# vm-max-memory configures the VM to use at max the specified amount of
# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
# is, if there is still enough contiguous space in the swap file.
#
# With vm-max-memory 0 the system will swap everything it can. Not a good
# default, just specify the max amount of RAM you can in bytes, but it's
# better to leave some margin. For instance specify an amount of RAM
# that's more or less between 60 and 80% of your free RAM.
vm-max-memory 0
# Redis swap files is split into pages. An object can be saved using multiple
# contiguous pages, but pages can't be shared between different objects.
# So if your page is too big, small objects swapped out on disk will waste
# a lot of space. If you page is too small, there is less space in the swap
# file (assuming you configured the same number of total swap file pages).
#
# If you use a lot of small objects, use a page size of 64 or 32 bytes.
# If you use a lot of big objects, use a bigger page size.
# If unsure, use the default :)
vm-page-size 32
# Number of total memory pages in the swap file.
# Given that the page table (a bitmap of free/used pages) is taken in memory,
# every 8 pages on disk will consume 1 byte of RAM.
#
# The total swap size is vm-page-size * vm-pages
#
# With the default of 32-bytes memory pages and 134217728 pages Redis will
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
#
# It's better to use the smallest acceptable value for your application,
# but the default is large in order to work in most conditions.
vm-pages 134217728
# Max number of VM I/O threads running at the same time.
# This threads are used to read/write data from/to swap file, since they
# also encode and decode objects from disk to memory or the reverse, a bigger
# number of threads can help with big objects even if they can't help with
# I/O itself as the physical device may not be able to couple with many
# reads/writes operations at the same time.
#
# The special value of 0 turn off threaded I/O and enables the blocking
# Virtual Memory implementation.
vm-max-threads 4
############################### ADVANCED CONFIG ###############################
# Glue small output buffers together in order to send small replies in a
# single TCP packet. Uses a bit more CPU but most of the times it is a win
# in terms of number of queries per second. Use 'yes' if unsure.
glueoutputbuf yes
# Hashes are encoded in a special way (much more memory efficient) when they
# have at max a given numer of elements, and the biggest element does not
# exceed a given threshold. You can configure this limits with the following
# configuration directives.
hash-max-zipmap-entries 64
hash-max-zipmap-value 512
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rhashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf

109
test/support/server.tcl Normal file
View File

@ -0,0 +1,109 @@
proc error_and_quit {config_file error} {
puts "!!COULD NOT START REDIS-SERVER\n"
puts "CONFIGURATION:"
puts [exec cat $config_file]
puts "\nERROR:"
puts [string trim $error]
exit 1
}
proc start_server {filename overrides {code undefined}} {
set data [split [exec cat "test/assets/$filename"] "\n"]
set config {}
foreach line $data {
if {[string length $line] > 0 && [string index $line 0] ne "#"} {
set elements [split $line " "]
set directive [lrange $elements 0 0]
set arguments [lrange $elements 1 end]
dict set config $directive $arguments
}
}
# use a different directory every time a server is started
dict set config dir [tmpdir server]
# apply overrides from arguments
foreach override $overrides {
set directive [lrange $override 0 0]
set arguments [lrange $override 1 end]
dict set config $directive $arguments
}
# write new configuration to temporary file
set config_file [tmpfile redis.conf]
set fp [open $config_file w+]
foreach directive [dict keys $config] {
puts -nonewline $fp "$directive "
puts $fp [dict get $config $directive]
}
close $fp
set stdout [format "%s/%s" [dict get $config "dir"] "stdout"]
set stderr [format "%s/%s" [dict get $config "dir"] "stderr"]
exec ./redis-server $config_file > $stdout 2> $stderr &
after 10
# check that the server actually started
if {[file size $stderr] > 0} {
error_and_quit $config_file [exec cat $stderr]
}
set line [exec head -n1 $stdout]
if {[string match {*already in use*} $line]} {
error_and_quit $config_file $line
}
# find out the pid
regexp {^\[(\d+)\]} [exec head -n1 $stdout] _ pid
# create the client object
set host $::host
set port $::port
if {[dict exists $config bind]} { set host [dict get $config bind] }
if {[dict exists $config port]} { set port [dict get $config port] }
set client [redis $host $port]
# select the right db when we don't have to authenticate
if {![dict exists $config requirepass]} {
$client select 9
}
if {$code ne "undefined"} {
# append the client to the client stack
lappend ::clients $client
# execute provided block
catch { uplevel 1 $code } err
# pop the client object
set ::clients [lrange $::clients 0 end-1]
# kill server and wait for the process to be totally exited
exec kill $pid
while 1 {
if {[catch {exec ps -p $pid | grep redis-server} result]} {
# non-zero exis status, process is gone
break;
}
after 10
}
if {[string length $err] > 0} {
puts "Error executing the suite, aborting..."
puts $err
exit 1
}
} else {
dict set ret "config" $config_file
dict set ret "pid" $pid
dict set ret "stdout" $stdout
dict set ret "stderr" $stderr
dict set ret "client" $client
set _ $ret
}
}
proc kill_server config {
set pid [dict get $config pid]
exec kill $pid
}

24
test/support/test.tcl Normal file
View File

@ -0,0 +1,24 @@
set ::passed 0
set ::failed 0
set ::testnum 0
proc test {name code okpattern} {
incr ::testnum
# if {$::testnum < $::first || $::testnum > $::last} return
puts -nonewline [format "#%03d %-70s " $::testnum $name]
flush stdout
set retval [uplevel 1 $code]
if {$okpattern eq $retval || [string match $okpattern $retval]} {
puts "PASSED"
incr ::passed
} else {
puts "!! ERROR expected\n'$okpattern'\nbut got\n'$retval'"
incr ::failed
}
if {$::traceleaks} {
if {![string match {*0 leaks*} [exec leaks redis-server]]} {
puts "--------- Test $::testnum LEAKED! --------"
exit 1
}
}
}

15
test/support/tmpfile.tcl Normal file
View File

@ -0,0 +1,15 @@
set ::tmpcounter 0
set ::tmproot "./test/tmp"
file mkdir $::tmproot
# returns a dirname unique to this process to write to
proc tmpdir {basename} {
set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]]
file mkdir $dir
set _ $dir
}
# return a filename unique to this process to write to
proc tmpfile {basename} {
file join $::tmproot $basename.[pid].[incr ::tmpcounter]
}

200
test/support/util.tcl Normal file
View File

@ -0,0 +1,200 @@
proc randstring {min max {type binary}} {
set len [expr {$min+int(rand()*($max-$min+1))}]
set output {}
if {$type eq {binary}} {
set minval 0
set maxval 255
} elseif {$type eq {alpha}} {
set minval 48
set maxval 122
} elseif {$type eq {compr}} {
set minval 48
set maxval 52
}
while {$len} {
append output [format "%c" [expr {$minval+int(rand()*($maxval-$minval+1))}]]
incr len -1
}
return $output
}
# Useful for some test
proc zlistAlikeSort {a b} {
if {[lindex $a 0] > [lindex $b 0]} {return 1}
if {[lindex $a 0] < [lindex $b 0]} {return -1}
string compare [lindex $a 1] [lindex $b 1]
}
proc waitForBgsave r {
while 1 {
set i [$r info]
if {[string match {*bgsave_in_progress:1*} $i]} {
puts -nonewline "\nWaiting for background save to finish... "
flush stdout
after 1000
} else {
break
}
}
}
proc waitForBgrewriteaof r {
while 1 {
set i [$r info]
if {[string match {*bgrewriteaof_in_progress:1*} $i]} {
puts -nonewline "\nWaiting for background AOF rewrite to finish... "
flush stdout
after 1000
} else {
break
}
}
}
proc randomInt {max} {
expr {int(rand()*$max)}
}
proc randpath args {
set path [expr {int(rand()*[llength $args])}]
uplevel 1 [lindex $args $path]
}
proc randomValue {} {
randpath {
# Small enough to likely collide
randomInt 1000
} {
# 32 bit compressible signed/unsigned
randpath {randomInt 2000000000} {randomInt 4000000000}
} {
# 64 bit
randpath {randomInt 1000000000000}
} {
# Random string
randpath {randstring 0 256 alpha} \
{randstring 0 256 compr} \
{randstring 0 256 binary}
}
}
proc randomKey {} {
randpath {
# Small enough to likely collide
randomInt 1000
} {
# 32 bit compressible signed/unsigned
randpath {randomInt 2000000000} {randomInt 4000000000}
} {
# 64 bit
randpath {randomInt 1000000000000}
} {
# Random string
randpath {randstring 1 256 alpha} \
{randstring 1 256 compr}
}
}
proc createComplexDataset {r ops} {
for {set j 0} {$j < $ops} {incr j} {
set k [randomKey]
set f [randomValue]
set v [randomValue]
randpath {
set d [expr {rand()}]
} {
set d [expr {rand()}]
} {
set d [expr {rand()}]
} {
set d [expr {rand()}]
} {
set d [expr {rand()}]
} {
randpath {set d +inf} {set d -inf}
}
set t [$r type $k]
if {$t eq {none}} {
randpath {
$r set $k $v
} {
$r lpush $k $v
} {
$r sadd $k $v
} {
$r zadd $k $d $v
} {
$r hset $k $f $v
}
set t [$r type $k]
}
switch $t {
{string} {
# Nothing to do
}
{list} {
randpath {$r lpush $k $v} \
{$r rpush $k $v} \
{$r lrem $k 0 $v} \
{$r rpop $k} \
{$r lpop $k}
}
{set} {
randpath {$r sadd $k $v} \
{$r srem $k $v}
}
{zset} {
randpath {$r zadd $k $d $v} \
{$r zrem $k $v}
}
{hash} {
randpath {$r hset $k $f $v} \
{$r hdel $k $f}
}
}
}
}
proc datasetDigest r {
set keys [lsort [$r keys *]]
set digest {}
foreach k $keys {
set t [$r type $k]
switch $t {
{string} {
set aux [::sha1::sha1 -hex [$r get $k]]
} {list} {
if {[$r llen $k] == 0} {
set aux {}
} else {
set aux [::sha1::sha1 -hex [$r lrange $k 0 -1]]
}
} {set} {
if {[$r scard $k] == 0} {
set aux {}
} else {
set aux [::sha1::sha1 -hex [lsort [$r smembers $k]]]
}
} {zset} {
if {[$r zcard $k] == 0} {
set aux {}
} else {
set aux [::sha1::sha1 -hex [$r zrange $k 0 -1 withscores]]
}
} {hash} {
if {[$r hlen $k] == 0} {
set aux {}
} else {
set aux [::sha1::sha1 -hex [lsort [$r hgetall $k]]]
}
} default {
error "Type not supported: $t"
}
}
if {$aux eq {}} continue
set digest [::sha1::sha1 -hex [join [list $aux $digest $k] "\n"]]
}
return $digest
}

50
test/test_helper.tcl Normal file
View File

@ -0,0 +1,50 @@
# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com
# This softare is released under the BSD License. See the COPYING file for
# more information.
set tcl_precision 17
source test/support/redis.tcl
source test/support/server.tcl
source test/support/tmpfile.tcl
source test/support/test.tcl
source test/support/util.tcl
set ::host 127.0.0.1
set ::port 6379
set ::traceleaks 0
proc execute_tests name {
set cur $::testnum
source "test/$name.tcl"
}
# setup a list to hold a stack of clients. the proc "r" provides easy
# access to the client at the top of the stack
set ::clients {}
proc r {args} {
set client [lindex $::clients end]
$client {*}$args
}
proc main {} {
execute_tests "unit/auth"
execute_tests "unit/protocol"
execute_tests "unit/basic"
execute_tests "unit/type/list"
execute_tests "unit/type/set"
execute_tests "unit/type/zset"
execute_tests "unit/type/hash"
execute_tests "unit/sort"
execute_tests "unit/expire"
execute_tests "unit/other"
puts "\n[expr $::passed+$::failed] tests, $::passed passed, $::failed failed"
if {$::failed > 0} {
puts "\n*** WARNING!!! $::failed FAILED TESTS ***\n"
}
# clean up tmp
exec rm -rf test/tmp/*
}
main

1
test/tmp/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*

15
test/unit/auth.tcl Normal file
View File

@ -0,0 +1,15 @@
start_server default.conf {{requirepass foobar}} {
test {AUTH fails when a wrong password is given} {
catch {r auth wrong!} err
format $err
} {ERR*invalid password}
test {Arbitrary command gives an error when AUTH is required} {
catch {r set foo bar} err
format $err
} {ERR*operation not permitted}
test {AUTH succeeds when the right password is given} {
r auth foobar
} {OK}
}

369
test/unit/basic.tcl Normal file
View File

@ -0,0 +1,369 @@
start_server default.conf {} {
test {DEL all keys to start with a clean DB} {
foreach key [r keys *] {r del $key}
r dbsize
} {0}
test {SET and GET an item} {
r set x foobar
r get x
} {foobar}
test {SET and GET an empty item} {
r set x {}
r get x
} {}
test {DEL against a single item} {
r del x
r get x
} {}
test {Vararg DEL} {
r set foo1 a
r set foo2 b
r set foo3 c
list [r del foo1 foo2 foo3 foo4] [r mget foo1 foo2 foo3]
} {3 {{} {} {}}}
test {KEYS with pattern} {
foreach key {key_x key_y key_z foo_a foo_b foo_c} {
r set $key hello
}
lsort [r keys foo*]
} {foo_a foo_b foo_c}
test {KEYS to get all keys} {
lsort [r keys *]
} {foo_a foo_b foo_c key_x key_y key_z}
test {DBSIZE} {
r dbsize
} {6}
test {DEL all keys} {
foreach key [r keys *] {r del $key}
r dbsize
} {0}
test {Very big payload in GET/SET} {
set buf [string repeat "abcd" 1000000]
r set foo $buf
r get foo
} [string repeat "abcd" 1000000]
test {Very big payload random access} {
set err {}
array set payload {}
for {set j 0} {$j < 100} {incr j} {
set size [expr 1+[randomInt 100000]]
set buf [string repeat "pl-$j" $size]
set payload($j) $buf
r set bigpayload_$j $buf
}
for {set j 0} {$j < 1000} {incr j} {
set index [randomInt 100]
set buf [r get bigpayload_$index]
if {$buf != $payload($index)} {
set err "Values differ: I set '$payload($index)' but I read back '$buf'"
break
}
}
unset payload
set _ $err
} {}
test {SET 10000 numeric keys and access all them in reverse order} {
set err {}
for {set x 0} {$x < 10000} {incr x} {
r set $x $x
}
set sum 0
for {set x 9999} {$x >= 0} {incr x -1} {
set val [r get $x]
if {$val ne $x} {
set err "Eleemnt at position $x is $val instead of $x"
break
}
}
set _ $err
} {}
test {DBSIZE should be 10101 now} {
r dbsize
} {10101}
test {INCR against non existing key} {
set res {}
append res [r incr novar]
append res [r get novar]
} {11}
test {INCR against key created by incr itself} {
r incr novar
} {2}
test {INCR against key originally set with SET} {
r set novar 100
r incr novar
} {101}
test {INCR over 32bit value} {
r set novar 17179869184
r incr novar
} {17179869185}
test {INCRBY over 32bit value with over 32bit increment} {
r set novar 17179869184
r incrby novar 17179869184
} {34359738368}
test {INCR fails against key with spaces (no integer encoded)} {
r set novar " 11 "
catch {r incr novar} err
format $err
} {ERR*}
test {INCR fails against a key holding a list} {
r rpush mylist 1
catch {r incr mylist} err
r rpop mylist
format $err
} {ERR*}
test {DECRBY over 32bit value with over 32bit increment, negative res} {
r set novar 17179869184
r decrby novar 17179869185
} {-1}
test {SETNX target key missing} {
r setnx novar2 foobared
r get novar2
} {foobared}
test {SETNX target key exists} {
r setnx novar2 blabla
r get novar2
} {foobared}
test {SETNX will overwrite EXPIREing key} {
r set x 10
r expire x 10000
r setnx x 20
r get x
} {20}
test {EXISTS} {
set res {}
r set newkey test
append res [r exists newkey]
r del newkey
append res [r exists newkey]
} {10}
test {Zero length value in key. SET/GET/EXISTS} {
r set emptykey {}
set res [r get emptykey]
append res [r exists emptykey]
r del emptykey
append res [r exists emptykey]
} {10}
test {Commands pipelining} {
set fd [r channel]
puts -nonewline $fd "SET k1 4\r\nxyzk\r\nGET k1\r\nPING\r\n"
flush $fd
set res {}
append res [string match OK* [::redis::redis_read_reply $fd]]
append res [::redis::redis_read_reply $fd]
append res [string match PONG* [::redis::redis_read_reply $fd]]
format $res
} {1xyzk1}
test {Non existing command} {
catch {r foobaredcommand} err
string match ERR* $err
} {1}
test {RENAME basic usage} {
r set mykey hello
r rename mykey mykey1
r rename mykey1 mykey2
r get mykey2
} {hello}
test {RENAME source key should no longer exist} {
r exists mykey
} {0}
test {RENAME against already existing key} {
r set mykey a
r set mykey2 b
r rename mykey2 mykey
set res [r get mykey]
append res [r exists mykey2]
} {b0}
test {RENAMENX basic usage} {
r del mykey
r del mykey2
r set mykey foobar
r renamenx mykey mykey2
set res [r get mykey2]
append res [r exists mykey]
} {foobar0}
test {RENAMENX against already existing key} {
r set mykey foo
r set mykey2 bar
r renamenx mykey mykey2
} {0}
test {RENAMENX against already existing key (2)} {
set res [r get mykey]
append res [r get mykey2]
} {foobar}
test {RENAME against non existing source key} {
catch {r rename nokey foobar} err
format $err
} {ERR*}
test {RENAME where source and dest key is the same} {
catch {r rename mykey mykey} err
format $err
} {ERR*}
test {DEL all keys again (DB 0)} {
foreach key [r keys *] {
r del $key
}
r dbsize
} {0}
test {DEL all keys again (DB 1)} {
r select 10
foreach key [r keys *] {
r del $key
}
set res [r dbsize]
r select 9
format $res
} {0}
test {MOVE basic usage} {
r set mykey foobar
r move mykey 10
set res {}
lappend res [r exists mykey]
lappend res [r dbsize]
r select 10
lappend res [r get mykey]
lappend res [r dbsize]
r select 9
format $res
} [list 0 0 foobar 1]
test {MOVE against key existing in the target DB} {
r set mykey hello
r move mykey 10
} {0}
test {SET/GET keys in different DBs} {
r set a hello
r set b world
r select 10
r set a foo
r set b bared
r select 9
set res {}
lappend res [r get a]
lappend res [r get b]
r select 10
lappend res [r get a]
lappend res [r get b]
r select 9
format $res
} {hello world foo bared}
test {MGET} {
r flushdb
r set foo BAR
r set bar FOO
r mget foo bar
} {BAR FOO}
test {MGET against non existing key} {
r mget foo baazz bar
} {BAR {} FOO}
test {MGET against non-string key} {
r sadd myset ciao
r sadd myset bau
r mget foo baazz bar myset
} {BAR {} FOO {}}
test {RANDOMKEY} {
r flushdb
r set foo x
r set bar y
set foo_seen 0
set bar_seen 0
for {set i 0} {$i < 100} {incr i} {
set rkey [r randomkey]
if {$rkey eq {foo}} {
set foo_seen 1
}
if {$rkey eq {bar}} {
set bar_seen 1
}
}
list $foo_seen $bar_seen
} {1 1}
test {RANDOMKEY against empty DB} {
r flushdb
r randomkey
} {}
test {RANDOMKEY regression 1} {
r flushdb
r set x 10
r del x
r randomkey
} {}
test {GETSET (set new value)} {
list [r getset foo xyz] [r get foo]
} {{} xyz}
test {GETSET (replace old value)} {
r set foo bar
list [r getset foo xyz] [r get foo]
} {bar xyz}
test {MSET base case} {
r mset x 10 y "foo bar" z "x x x x x x x\n\n\r\n"
r mget x y z
} [list 10 {foo bar} "x x x x x x x\n\n\r\n"]
test {MSET wrong number of args} {
catch {r mset x 10 y "foo bar" z} err
format $err
} {*wrong number*}
test {MSETNX with already existent key} {
list [r msetnx x1 xxx y2 yyy x 20] [r exists x1] [r exists y2]
} {0 0 0}
test {MSETNX with not existing keys} {
list [r msetnx x1 xxx y2 yyy] [r get x1] [r get y2]
} {1 xxx yyy}
test {MSETNX should remove all the volatile keys even on failure} {
r mset x 1 y 2 z 3
r expire y 10000
r expire z 10000
list [r msetnx x A y B z C] [r mget x y z]
} {0 {1 {} {}}}
}

58
test/unit/expire.tcl Normal file
View File

@ -0,0 +1,58 @@
start_server default.conf {} {
test {EXPIRE - don't set timeouts multiple times} {
r set x foobar
set v1 [r expire x 5]
set v2 [r ttl x]
set v3 [r expire x 10]
set v4 [r ttl x]
list $v1 $v2 $v3 $v4
} {1 5 0 5}
test {EXPIRE - It should be still possible to read 'x'} {
r get x
} {foobar}
test {EXPIRE - After 6 seconds the key should no longer be here} {
after 6000
list [r get x] [r exists x]
} {{} 0}
test {EXPIRE - Delete on write policy} {
r del x
r lpush x foo
r expire x 1000
r lpush x bar
r lrange x 0 -1
} {bar}
test {EXPIREAT - Check for EXPIRE alike behavior} {
r del x
r set x foo
r expireat x [expr [clock seconds]+15]
r ttl x
} {1[345]}
test {SETEX - Set + Expire combo operation. Check for TTL} {
r setex x 12 test
r ttl x
} {1[012]}
test {SETEX - Check value} {
r get x
} {test}
test {SETEX - Overwrite old key} {
r setex y 1 foo
r get y
} {foo}
test {SETEX - Wait for the key to expire} {
after 3000
r get y
} {}
test {SETEX - Wrong time parameter} {
catch {r setex z -10 foo} e
set _ $e
} {*invalid expire*}
}

218
test/unit/other.tcl Normal file
View File

@ -0,0 +1,218 @@
start_server default.conf {} {
test {SAVE - make sure there are all the types as values} {
# Wait for a background saving in progress to terminate
waitForBgsave r
r lpush mysavelist hello
r lpush mysavelist world
r set myemptykey {}
r set mynormalkey {blablablba}
r zadd mytestzset 10 a
r zadd mytestzset 20 b
r zadd mytestzset 30 c
r save
} {OK}
foreach fuzztype {binary alpha compr} {
test "FUZZ stresser with data model $fuzztype" {
set err 0
for {set i 0} {$i < 10000} {incr i} {
set fuzz [randstring 0 512 $fuzztype]
r set foo $fuzz
set got [r get foo]
if {$got ne $fuzz} {
set err [list $fuzz $got]
break
}
}
set _ $err
} {0}
}
test {BGSAVE} {
waitForBgsave r
r flushdb
r save
r set x 10
r bgsave
waitForBgsave r
r debug reload
r get x
} {10}
test {SELECT an out of range DB} {
catch {r select 1000000} err
set _ $err
} {*invalid*}
if {![catch {package require sha1}]} {
test {Check consistency of different data types after a reload} {
r flushdb
createComplexDataset r 10000
set sha1 [datasetDigest r]
r debug reload
set sha1_after [datasetDigest r]
expr {$sha1 eq $sha1_after}
} {1}
test {Same dataset digest if saving/reloading as AOF?} {
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
set sha1_after [datasetDigest r]
expr {$sha1 eq $sha1_after}
} {1}
}
test {EXPIRES after a reload (snapshot + append only file)} {
r flushdb
r set x 10
r expire x 1000
r save
r debug reload
set ttl [r ttl x]
set e1 [expr {$ttl > 900 && $ttl <= 1000}]
r bgrewriteaof
waitForBgrewriteaof r
set ttl [r ttl x]
set e2 [expr {$ttl > 900 && $ttl <= 1000}]
list $e1 $e2
} {1 1}
test {PIPELINING stresser (also a regression for the old epoll bug)} {
set fd2 [socket $::host $::port]
fconfigure $fd2 -encoding binary -translation binary
puts -nonewline $fd2 "SELECT 9\r\n"
flush $fd2
gets $fd2
for {set i 0} {$i < 100000} {incr i} {
set q {}
set val "0000${i}0000"
append q "SET key:$i [string length $val]\r\n$val\r\n"
puts -nonewline $fd2 $q
set q {}
append q "GET key:$i\r\n"
puts -nonewline $fd2 $q
}
flush $fd2
for {set i 0} {$i < 100000} {incr i} {
gets $fd2 line
gets $fd2 count
set count [string range $count 1 end]
set val [read $fd2 $count]
read $fd2 2
}
close $fd2
set _ 1
} {1}
test {MUTLI / EXEC basics} {
r del mylist
r rpush mylist a
r rpush mylist b
r rpush mylist c
r multi
set v1 [r lrange mylist 0 -1]
set v2 [r ping]
set v3 [r exec]
list $v1 $v2 $v3
} {QUEUED QUEUED {{a b c} PONG}}
test {DISCARD} {
r del mylist
r rpush mylist a
r rpush mylist b
r rpush mylist c
r multi
set v1 [r del mylist]
set v2 [r discard]
set v3 [r lrange mylist 0 -1]
list $v1 $v2 $v3
} {QUEUED OK {a b c}}
test {APPEND basics} {
list [r append foo bar] [r get foo] \
[r append foo 100] [r get foo]
} {3 bar 6 bar100}
test {APPEND basics, integer encoded values} {
set res {}
r del foo
r append foo 1
r append foo 2
lappend res [r get foo]
r set foo 1
r append foo 2
lappend res [r get foo]
} {12 12}
test {APPEND fuzzing} {
set err {}
foreach type {binary alpha compr} {
set buf {}
r del x
for {set i 0} {$i < 1000} {incr i} {
set bin [randstring 0 10 $type]
append buf $bin
r append x $bin
}
if {$buf != [r get x]} {
set err "Expected '$buf' found '[r get x]'"
break
}
}
set _ $err
} {}
test {SUBSTR basics} {
set res {}
r set foo "Hello World"
lappend res [r substr foo 0 3]
lappend res [r substr foo 0 -1]
lappend res [r substr foo -4 -1]
lappend res [r substr foo 5 3]
lappend res [r substr foo 5 5000]
lappend res [r substr foo -5000 10000]
set _ $res
} {Hell {Hello World} orld {} { World} {Hello World}}
test {SUBSTR against integer encoded values} {
r set foo 123
r substr foo 0 -2
} {12}
test {SUBSTR fuzzing} {
set err {}
for {set i 0} {$i < 1000} {incr i} {
set bin [randstring 0 1024 binary]
set _start [set start [randomInt 1500]]
set _end [set end [randomInt 1500]]
if {$_start < 0} {set _start "end-[abs($_start)-1]"}
if {$_end < 0} {set _end "end-[abs($_end)-1]"}
set s1 [string range $bin $_start $_end]
r set bin $bin
set s2 [r substr bin $start $end]
if {$s1 != $s2} {
set err "String mismatch"
break
}
}
set _ $err
} {}
# Leave the user with a clean DB before to exit
test {FLUSHDB} {
set aux {}
r select 9
r flushdb
lappend aux [r dbsize]
r select 10
r flushdb
lappend aux [r dbsize]
} {0 0}
test {Perform a final SAVE to leave a clean DB on disk} {
r save
} {OK}
}

41
test/unit/protocol.tcl Normal file
View File

@ -0,0 +1,41 @@
start_server default.conf {} {
test {Handle an empty query well} {
set fd [r channel]
puts -nonewline $fd "\r\n"
flush $fd
r ping
} {PONG}
test {Negative multi bulk command does not create problems} {
set fd [r channel]
puts -nonewline $fd "*-10\r\n"
flush $fd
r ping
} {PONG}
test {Negative multi bulk payload} {
set fd [r channel]
puts -nonewline $fd "SET x -10\r\n"
flush $fd
gets $fd
} {*invalid bulk*}
test {Too big bulk payload} {
set fd [r channel]
puts -nonewline $fd "SET x 2000000000\r\n"
flush $fd
gets $fd
} {*invalid bulk*count*}
test {Multi bulk request not followed by bulk args} {
set fd [r channel]
puts -nonewline $fd "*1\r\nfoo\r\n"
flush $fd
gets $fd
} {*protocol error*}
test {Generic wrong number of args} {
catch {r ping x y z} err
set _ $err
} {*wrong*arguments*ping*}
}

177
test/unit/sort.tcl Normal file
View File

@ -0,0 +1,177 @@
start_server default.conf {} {
test {SORT ALPHA against integer encoded strings} {
r del mylist
r lpush mylist 2
r lpush mylist 1
r lpush mylist 3
r lpush mylist 10
r sort mylist alpha
} {1 10 2 3}
test {Create a random list and a random set} {
set tosort {}
array set seenrand {}
for {set i 0} {$i < 10000} {incr i} {
while 1 {
# Make sure all the weights are different because
# Redis does not use a stable sort but Tcl does.
randpath {
set rint [expr int(rand()*1000000)]
} {
set rint [expr rand()]
}
if {![info exists seenrand($rint)]} break
}
set seenrand($rint) x
r lpush tosort $i
r sadd tosort-set $i
r set weight_$i $rint
r hset wobj_$i weight $rint
lappend tosort [list $i $rint]
}
set sorted [lsort -index 1 -real $tosort]
set res {}
for {set i 0} {$i < 10000} {incr i} {
lappend res [lindex $sorted $i 0]
}
format {}
} {}
test {SORT with BY against the newly created list} {
r sort tosort {BY weight_*}
} $res
test {SORT with BY (hash field) against the newly created list} {
r sort tosort {BY wobj_*->weight}
} $res
test {SORT with GET (key+hash) with sanity check of each element (list)} {
set err {}
set l1 [r sort tosort GET # GET weight_*]
set l2 [r sort tosort GET # GET wobj_*->weight]
foreach {id1 w1} $l1 {id2 w2} $l2 {
set realweight [r get weight_$id1]
if {$id1 != $id2} {
set err "ID mismatch $id1 != $id2"
break
}
if {$realweight != $w1 || $realweight != $w2} {
set err "Weights mismatch! w1: $w1 w2: $w2 real: $realweight"
break
}
}
set _ $err
} {}
test {SORT with BY, but against the newly created set} {
r sort tosort-set {BY weight_*}
} $res
test {SORT with BY (hash field), but against the newly created set} {
r sort tosort-set {BY wobj_*->weight}
} $res
test {SORT with BY and STORE against the newly created list} {
r sort tosort {BY weight_*} store sort-res
r lrange sort-res 0 -1
} $res
test {SORT with BY (hash field) and STORE against the newly created list} {
r sort tosort {BY wobj_*->weight} store sort-res
r lrange sort-res 0 -1
} $res
test {SORT direct, numeric, against the newly created list} {
r sort tosort
} [lsort -integer $res]
test {SORT decreasing sort} {
r sort tosort {DESC}
} [lsort -decreasing -integer $res]
test {SORT speed, sorting 10000 elements list using BY, 100 times} {
set start [clock clicks -milliseconds]
for {set i 0} {$i < 100} {incr i} {
set sorted [r sort tosort {BY weight_* LIMIT 0 10}]
}
set elapsed [expr [clock clicks -milliseconds]-$start]
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
flush stdout
format {}
} {}
test {SORT speed, as above but against hash field} {
set start [clock clicks -milliseconds]
for {set i 0} {$i < 100} {incr i} {
set sorted [r sort tosort {BY wobj_*->weight LIMIT 0 10}]
}
set elapsed [expr [clock clicks -milliseconds]-$start]
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
flush stdout
format {}
} {}
test {SORT speed, sorting 10000 elements list directly, 100 times} {
set start [clock clicks -milliseconds]
for {set i 0} {$i < 100} {incr i} {
set sorted [r sort tosort {LIMIT 0 10}]
}
set elapsed [expr [clock clicks -milliseconds]-$start]
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
flush stdout
format {}
} {}
test {SORT speed, pseudo-sorting 10000 elements list, BY <const>, 100 times} {
set start [clock clicks -milliseconds]
for {set i 0} {$i < 100} {incr i} {
set sorted [r sort tosort {BY nokey LIMIT 0 10}]
}
set elapsed [expr [clock clicks -milliseconds]-$start]
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
flush stdout
format {}
} {}
test {SORT regression for issue #19, sorting floats} {
r flushdb
foreach x {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15} {
r lpush mylist $x
}
r sort mylist
} [lsort -real {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15}]
test {SORT with GET #} {
r del mylist
r lpush mylist 1
r lpush mylist 2
r lpush mylist 3
r mset weight_1 10 weight_2 5 weight_3 30
r sort mylist BY weight_* GET #
} {2 1 3}
test {SORT with constant GET} {
r sort mylist GET foo
} {{} {} {}}
test {SORT against sorted sets} {
r del zset
r zadd zset 1 a
r zadd zset 5 b
r zadd zset 2 c
r zadd zset 10 d
r zadd zset 3 e
r sort zset alpha desc
} {e d c b a}
test {Sorted sets +inf and -inf handling} {
r del zset
r zadd zset -100 a
r zadd zset 200 b
r zadd zset -300 c
r zadd zset 1000000 d
r zadd zset +inf max
r zadd zset -inf min
r zrange zset 0 -1
} {min c a b d max}
}

289
test/unit/type/hash.tcl Normal file
View File

@ -0,0 +1,289 @@
start_server default.conf {} {
test {HSET/HLEN - Small hash creation} {
array set smallhash {}
for {set i 0} {$i < 8} {incr i} {
set key [randstring 0 8 alpha]
set val [randstring 0 8 alpha]
if {[info exists smallhash($key)]} {
incr i -1
continue
}
r hset smallhash $key $val
set smallhash($key) $val
}
list [r hlen smallhash]
} {8}
test {Is the small hash encoded with a zipmap?} {
r debug object smallhash
} {*zipmap*}
test {HSET/HLEN - Big hash creation} {
array set bighash {}
for {set i 0} {$i < 1024} {incr i} {
set key [randstring 0 8 alpha]
set val [randstring 0 8 alpha]
if {[info exists bighash($key)]} {
incr i -1
continue
}
r hset bighash $key $val
set bighash($key) $val
}
list [r hlen bighash]
} {1024}
test {Is the big hash encoded with a zipmap?} {
r debug object bighash
} {*hashtable*}
test {HGET against the small hash} {
set err {}
foreach k [array names smallhash *] {
if {$smallhash($k) ne [r hget smallhash $k]} {
set err "$smallhash($k) != [r hget smallhash $k]"
break
}
}
set _ $err
} {}
test {HGET against the big hash} {
set err {}
foreach k [array names bighash *] {
if {$bighash($k) ne [r hget bighash $k]} {
set err "$bighash($k) != [r hget bighash $k]"
break
}
}
set _ $err
} {}
test {HGET against non existing key} {
set rv {}
lappend rv [r hget smallhash __123123123__]
lappend rv [r hget bighash __123123123__]
set _ $rv
} {{} {}}
test {HSET in update and insert mode} {
set rv {}
set k [lindex [array names smallhash *] 0]
lappend rv [r hset smallhash $k newval1]
set smallhash($k) newval1
lappend rv [r hget smallhash $k]
lappend rv [r hset smallhash __foobar123__ newval]
set k [lindex [array names bighash *] 0]
lappend rv [r hset bighash $k newval2]
set bighash($k) newval2
lappend rv [r hget bighash $k]
lappend rv [r hset bighash __foobar123__ newval]
lappend rv [r hdel smallhash __foobar123__]
lappend rv [r hdel bighash __foobar123__]
set _ $rv
} {0 newval1 1 0 newval2 1 1 1}
test {HSETNX target key missing - small hash} {
r hsetnx smallhash __123123123__ foo
r hget smallhash __123123123__
} {foo}
test {HSETNX target key exists - small hash} {
r hsetnx smallhash __123123123__ bar
set result [r hget smallhash __123123123__]
r hdel smallhash __123123123__
set _ $result
} {foo}
test {HSETNX target key missing - big hash} {
r hsetnx bighash __123123123__ foo
r hget bighash __123123123__
} {foo}
test {HSETNX target key exists - big hash} {
r hsetnx bighash __123123123__ bar
set result [r hget bighash __123123123__]
r hdel bighash __123123123__
set _ $result
} {foo}
test {HMSET wrong number of args} {
catch {r hmset smallhash key1 val1 key2} err
format $err
} {*wrong number*}
test {HMSET - small hash} {
set args {}
foreach {k v} [array get smallhash] {
set newval [randstring 0 8 alpha]
set smallhash($k) $newval
lappend args $k $newval
}
r hmset smallhash {*}$args
} {OK}
test {HMSET - big hash} {
set args {}
foreach {k v} [array get bighash] {
set newval [randstring 0 8 alpha]
set bighash($k) $newval
lappend args $k $newval
}
r hmset bighash {*}$args
} {OK}
test {HMGET against non existing key and fields} {
set rv {}
lappend rv [r hmget doesntexist __123123123__ __456456456__]
lappend rv [r hmget smallhash __123123123__ __456456456__]
lappend rv [r hmget bighash __123123123__ __456456456__]
set _ $rv
} {{{} {}} {{} {}} {{} {}}}
test {HMGET - small hash} {
set keys {}
set vals {}
foreach {k v} [array get smallhash] {
lappend keys $k
lappend vals $v
}
set err {}
set result [r hmget smallhash {*}$keys]
if {$vals ne $result} {
set err "$vals != $result"
break
}
set _ $err
} {}
test {HMGET - big hash} {
set keys {}
set vals {}
foreach {k v} [array get bighash] {
lappend keys $k
lappend vals $v
}
set err {}
set result [r hmget bighash {*}$keys]
if {$vals ne $result} {
set err "$vals != $result"
break
}
set _ $err
} {}
test {HKEYS - small hash} {
lsort [r hkeys smallhash]
} [lsort [array names smallhash *]]
test {HKEYS - big hash} {
lsort [r hkeys bighash]
} [lsort [array names bighash *]]
test {HVALS - small hash} {
set vals {}
foreach {k v} [array get smallhash] {
lappend vals $v
}
set _ [lsort $vals]
} [lsort [r hvals smallhash]]
test {HVALS - big hash} {
set vals {}
foreach {k v} [array get bighash] {
lappend vals $v
}
set _ [lsort $vals]
} [lsort [r hvals bighash]]
test {HGETALL - small hash} {
lsort [r hgetall smallhash]
} [lsort [array get smallhash]]
test {HGETALL - big hash} {
lsort [r hgetall bighash]
} [lsort [array get bighash]]
test {HDEL and return value} {
set rv {}
lappend rv [r hdel smallhash nokey]
lappend rv [r hdel bighash nokey]
set k [lindex [array names smallhash *] 0]
lappend rv [r hdel smallhash $k]
lappend rv [r hdel smallhash $k]
lappend rv [r hget smallhash $k]
unset smallhash($k)
set k [lindex [array names bighash *] 0]
lappend rv [r hdel bighash $k]
lappend rv [r hdel bighash $k]
lappend rv [r hget bighash $k]
unset bighash($k)
set _ $rv
} {0 0 1 0 {} 1 0 {}}
test {HEXISTS} {
set rv {}
set k [lindex [array names smallhash *] 0]
lappend rv [r hexists smallhash $k]
lappend rv [r hexists smallhash nokey]
set k [lindex [array names bighash *] 0]
lappend rv [r hexists bighash $k]
lappend rv [r hexists bighash nokey]
} {1 0 1 0}
test {Is a zipmap encoded Hash promoted on big payload?} {
r hset smallhash foo [string repeat a 1024]
r debug object smallhash
} {*hashtable*}
test {HINCRBY against non existing database key} {
r del htest
list [r hincrby htest foo 2]
} {2}
test {HINCRBY against non existing hash key} {
set rv {}
r hdel smallhash tmp
r hdel bighash tmp
lappend rv [r hincrby smallhash tmp 2]
lappend rv [r hget smallhash tmp]
lappend rv [r hincrby bighash tmp 2]
lappend rv [r hget bighash tmp]
} {2 2 2 2}
test {HINCRBY against hash key created by hincrby itself} {
set rv {}
lappend rv [r hincrby smallhash tmp 3]
lappend rv [r hget smallhash tmp]
lappend rv [r hincrby bighash tmp 3]
lappend rv [r hget bighash tmp]
} {5 5 5 5}
test {HINCRBY against hash key originally set with HSET} {
r hset smallhash tmp 100
r hset bighash tmp 100
list [r hincrby smallhash tmp 2] [r hincrby bighash tmp 2]
} {102 102}
test {HINCRBY over 32bit value} {
r hset smallhash tmp 17179869184
r hset bighash tmp 17179869184
list [r hincrby smallhash tmp 1] [r hincrby bighash tmp 1]
} {17179869185 17179869185}
test {HINCRBY over 32bit value with over 32bit increment} {
r hset smallhash tmp 17179869184
r hset bighash tmp 17179869184
list [r hincrby smallhash tmp 17179869184] [r hincrby bighash tmp 17179869184]
} {34359738368 34359738368}
test {HINCRBY fails against hash value with spaces} {
r hset smallhash str " 11 "
r hset bighash str " 11 "
catch {r hincrby smallhash str 1} smallerr
catch {r hincrby smallhash str 1} bigerr
set rv {}
lappend rv [string match "ERR*not an integer*" $smallerr]
lappend rv [string match "ERR*not an integer*" $bigerr]
} {1 1}
}

328
test/unit/type/list.tcl Normal file
View File

@ -0,0 +1,328 @@
start_server default.conf {} {
test {Basic LPUSH, RPUSH, LLENGTH, LINDEX} {
set res [r lpush mylist a]
append res [r lpush mylist b]
append res [r rpush mylist c]
append res [r llen mylist]
append res [r rpush anotherlist d]
append res [r lpush anotherlist e]
append res [r llen anotherlist]
append res [r lindex mylist 0]
append res [r lindex mylist 1]
append res [r lindex mylist 2]
append res [r lindex anotherlist 0]
append res [r lindex anotherlist 1]
list $res [r lindex mylist 100]
} {1233122baced {}}
test {DEL a list} {
r del mylist
r exists mylist
} {0}
test {Create a long list and check every single element with LINDEX} {
set ok 0
for {set i 0} {$i < 1000} {incr i} {
r rpush mylist $i
}
for {set i 0} {$i < 1000} {incr i} {
if {[r lindex mylist $i] eq $i} {incr ok}
if {[r lindex mylist [expr (-$i)-1]] eq [expr 999-$i]} {
incr ok
}
}
format $ok
} {2000}
test {Test elements with LINDEX in random access} {
set ok 0
for {set i 0} {$i < 1000} {incr i} {
set rint [expr int(rand()*1000)]
if {[r lindex mylist $rint] eq $rint} {incr ok}
if {[r lindex mylist [expr (-$rint)-1]] eq [expr 999-$rint]} {
incr ok
}
}
format $ok
} {2000}
test {Check if the list is still ok after a DEBUG RELOAD} {
r debug reload
set ok 0
for {set i 0} {$i < 1000} {incr i} {
set rint [expr int(rand()*1000)]
if {[r lindex mylist $rint] eq $rint} {incr ok}
if {[r lindex mylist [expr (-$rint)-1]] eq [expr 999-$rint]} {
incr ok
}
}
format $ok
} {2000}
test {LLEN against non-list value error} {
r del mylist
r set mylist foobar
catch {r llen mylist} err
format $err
} {ERR*}
test {LLEN against non existing key} {
r llen not-a-key
} {0}
test {LINDEX against non-list value error} {
catch {r lindex mylist 0} err
format $err
} {ERR*}
test {LINDEX against non existing key} {
r lindex not-a-key 10
} {}
test {LPUSH against non-list value error} {
catch {r lpush mylist 0} err
format $err
} {ERR*}
test {RPUSH against non-list value error} {
catch {r rpush mylist 0} err
format $err
} {ERR*}
test {RPOPLPUSH base case} {
r del mylist
r rpush mylist a
r rpush mylist b
r rpush mylist c
r rpush mylist d
set v1 [r rpoplpush mylist newlist]
set v2 [r rpoplpush mylist newlist]
set l1 [r lrange mylist 0 -1]
set l2 [r lrange newlist 0 -1]
list $v1 $v2 $l1 $l2
} {d c {a b} {c d}}
test {RPOPLPUSH with the same list as src and dst} {
r del mylist
r rpush mylist a
r rpush mylist b
r rpush mylist c
set l1 [r lrange mylist 0 -1]
set v [r rpoplpush mylist mylist]
set l2 [r lrange mylist 0 -1]
list $l1 $v $l2
} {{a b c} c {c a b}}
test {RPOPLPUSH target list already exists} {
r del mylist
r del newlist
r rpush mylist a
r rpush mylist b
r rpush mylist c
r rpush mylist d
r rpush newlist x
set v1 [r rpoplpush mylist newlist]
set v2 [r rpoplpush mylist newlist]
set l1 [r lrange mylist 0 -1]
set l2 [r lrange newlist 0 -1]
list $v1 $v2 $l1 $l2
} {d c {a b} {c d x}}
test {RPOPLPUSH against non existing key} {
r del mylist
r del newlist
set v1 [r rpoplpush mylist newlist]
list $v1 [r exists mylist] [r exists newlist]
} {{} 0 0}
test {RPOPLPUSH against non list src key} {
r del mylist
r del newlist
r set mylist x
catch {r rpoplpush mylist newlist} err
list [r type mylist] [r exists newlist] [string range $err 0 2]
} {string 0 ERR}
test {RPOPLPUSH against non list dst key} {
r del mylist
r del newlist
r rpush mylist a
r rpush mylist b
r rpush mylist c
r rpush mylist d
r set newlist x
catch {r rpoplpush mylist newlist} err
list [r lrange mylist 0 -1] [r type newlist] [string range $err 0 2]
} {{a b c d} string ERR}
test {RPOPLPUSH against non existing src key} {
r del mylist
r del newlist
r rpoplpush mylist newlist
} {}
test {Basic LPOP/RPOP} {
r del mylist
r rpush mylist 1
r rpush mylist 2
r lpush mylist 0
list [r lpop mylist] [r rpop mylist] [r lpop mylist] [r llen mylist]
} [list 0 2 1 0]
test {LPOP/RPOP against empty list} {
r lpop mylist
} {}
test {LPOP against non list value} {
r set notalist foo
catch {r lpop notalist} err
format $err
} {ERR*kind*}
test {Mass LPUSH/LPOP} {
set sum 0
for {set i 0} {$i < 1000} {incr i} {
r lpush mylist $i
incr sum $i
}
set sum2 0
for {set i 0} {$i < 500} {incr i} {
incr sum2 [r lpop mylist]
incr sum2 [r rpop mylist]
}
expr $sum == $sum2
} {1}
test {LRANGE basics} {
for {set i 0} {$i < 10} {incr i} {
r rpush mylist $i
}
list [r lrange mylist 1 -2] \
[r lrange mylist -3 -1] \
[r lrange mylist 4 4]
} {{1 2 3 4 5 6 7 8} {7 8 9} 4}
test {LRANGE inverted indexes} {
r lrange mylist 6 2
} {}
test {LRANGE out of range indexes including the full list} {
r lrange mylist -1000 1000
} {0 1 2 3 4 5 6 7 8 9}
test {LRANGE against non existing key} {
r lrange nosuchkey 0 1
} {}
test {LTRIM basics} {
r del mylist
for {set i 0} {$i < 100} {incr i} {
r lpush mylist $i
r ltrim mylist 0 4
}
r lrange mylist 0 -1
} {99 98 97 96 95}
test {LTRIM stress testing} {
set mylist {}
set err {}
for {set i 0} {$i < 20} {incr i} {
lappend mylist $i
}
for {set j 0} {$j < 100} {incr j} {
# Fill the list
r del mylist
for {set i 0} {$i < 20} {incr i} {
r rpush mylist $i
}
# Trim at random
set a [randomInt 20]
set b [randomInt 20]
r ltrim mylist $a $b
if {[r lrange mylist 0 -1] ne [lrange $mylist $a $b]} {
set err "[r lrange mylist 0 -1] != [lrange $mylist $a $b]"
break
}
}
set _ $err
} {}
test {LSET} {
r del mylist
foreach x {99 98 97 96 95} {
r rpush mylist $x
}
r lset mylist 1 foo
r lset mylist -1 bar
r lrange mylist 0 -1
} {99 foo 97 96 bar}
test {LSET out of range index} {
catch {r lset mylist 10 foo} err
format $err
} {ERR*range*}
test {LSET against non existing key} {
catch {r lset nosuchkey 10 foo} err
format $err
} {ERR*key*}
test {LSET against non list value} {
r set nolist foobar
catch {r lset nolist 0 foo} err
format $err
} {ERR*value*}
test {LREM, remove all the occurrences} {
r flushdb
r rpush mylist foo
r rpush mylist bar
r rpush mylist foobar
r rpush mylist foobared
r rpush mylist zap
r rpush mylist bar
r rpush mylist test
r rpush mylist foo
set res [r lrem mylist 0 bar]
list [r lrange mylist 0 -1] $res
} {{foo foobar foobared zap test foo} 2}
test {LREM, remove the first occurrence} {
set res [r lrem mylist 1 foo]
list [r lrange mylist 0 -1] $res
} {{foobar foobared zap test foo} 1}
test {LREM, remove non existing element} {
set res [r lrem mylist 1 nosuchelement]
list [r lrange mylist 0 -1] $res
} {{foobar foobared zap test foo} 0}
test {LREM, starting from tail with negative count} {
r flushdb
r rpush mylist foo
r rpush mylist bar
r rpush mylist foobar
r rpush mylist foobared
r rpush mylist zap
r rpush mylist bar
r rpush mylist test
r rpush mylist foo
r rpush mylist foo
set res [r lrem mylist -1 bar]
list [r lrange mylist 0 -1] $res
} {{foo bar foobar foobared zap test foo foo} 1}
test {LREM, starting from tail with negative count (2)} {
set res [r lrem mylist -2 foo]
list [r lrange mylist 0 -1] $res
} {{foo bar foobar foobared zap test} 2}
test {LREM, deleting objects that may be encoded as integers} {
r lpush myotherlist 1
r lpush myotherlist 2
r lpush myotherlist 3
r lrem myotherlist 1 2
r llen myotherlist
} {2}
}

151
test/unit/type/set.tcl Normal file
View File

@ -0,0 +1,151 @@
start_server default.conf {} {
test {SADD, SCARD, SISMEMBER, SMEMBERS basics} {
r sadd myset foo
r sadd myset bar
list [r scard myset] [r sismember myset foo] \
[r sismember myset bar] [r sismember myset bla] \
[lsort [r smembers myset]]
} {2 1 1 0 {bar foo}}
test {SADD adding the same element multiple times} {
r sadd myset foo
r sadd myset foo
r sadd myset foo
r scard myset
} {2}
test {SADD against non set} {
r lpush mylist foo
catch {r sadd mylist bar} err
format $err
} {ERR*kind*}
test {SREM basics} {
r sadd myset ciao
r srem myset foo
lsort [r smembers myset]
} {bar ciao}
test {Mass SADD and SINTER with two sets} {
for {set i 0} {$i < 1000} {incr i} {
r sadd set1 $i
r sadd set2 [expr $i+995]
}
lsort [r sinter set1 set2]
} {995 996 997 998 999}
test {SUNION with two sets} {
lsort [r sunion set1 set2]
} [lsort -uniq "[r smembers set1] [r smembers set2]"]
test {SINTERSTORE with two sets} {
r sinterstore setres set1 set2
lsort [r smembers setres]
} {995 996 997 998 999}
test {SINTERSTORE with two sets, after a DEBUG RELOAD} {
r debug reload
r sinterstore setres set1 set2
lsort [r smembers setres]
} {995 996 997 998 999}
test {SUNIONSTORE with two sets} {
r sunionstore setres set1 set2
lsort [r smembers setres]
} [lsort -uniq "[r smembers set1] [r smembers set2]"]
test {SUNIONSTORE against non existing keys} {
r set setres xxx
list [r sunionstore setres foo111 bar222] [r exists xxx]
} {0 0}
test {SINTER against three sets} {
r sadd set3 999
r sadd set3 995
r sadd set3 1000
r sadd set3 2000
lsort [r sinter set1 set2 set3]
} {995 999}
test {SINTERSTORE with three sets} {
r sinterstore setres set1 set2 set3
lsort [r smembers setres]
} {995 999}
test {SUNION with non existing keys} {
lsort [r sunion nokey1 set1 set2 nokey2]
} [lsort -uniq "[r smembers set1] [r smembers set2]"]
test {SDIFF with two sets} {
for {set i 5} {$i < 1000} {incr i} {
r sadd set4 $i
}
lsort [r sdiff set1 set4]
} {0 1 2 3 4}
test {SDIFF with three sets} {
r sadd set5 0
lsort [r sdiff set1 set4 set5]
} {1 2 3 4}
test {SDIFFSTORE with three sets} {
r sdiffstore sres set1 set4 set5
lsort [r smembers sres]
} {1 2 3 4}
test {SPOP basics} {
r del myset
r sadd myset 1
r sadd myset 2
r sadd myset 3
list [lsort [list [r spop myset] [r spop myset] [r spop myset]]] [r scard myset]
} {{1 2 3} 0}
test {SRANDMEMBER} {
r del myset
r sadd myset a
r sadd myset b
r sadd myset c
unset -nocomplain myset
array set myset {}
for {set i 0} {$i < 100} {incr i} {
set myset([r srandmember myset]) 1
}
lsort [array names myset]
} {a b c}
test {SMOVE basics} {
r sadd myset1 a
r sadd myset1 b
r sadd myset1 c
r sadd myset2 x
r sadd myset2 y
r sadd myset2 z
r smove myset1 myset2 a
list [lsort [r smembers myset2]] [lsort [r smembers myset1]]
} {{a x y z} {b c}}
test {SMOVE non existing key} {
list [r smove myset1 myset2 foo] [lsort [r smembers myset2]] [lsort [r smembers myset1]]
} {0 {a x y z} {b c}}
test {SMOVE non existing src set} {
list [r smove noset myset2 foo] [lsort [r smembers myset2]]
} {0 {a x y z}}
test {SMOVE non existing dst set} {
list [r smove myset2 myset3 y] [lsort [r smembers myset2]] [lsort [r smembers myset3]]
} {1 {a x z} y}
test {SMOVE wrong src key type} {
r set x 10
catch {r smove x myset2 foo} err
format $err
} {ERR*}
test {SMOVE wrong dst key type} {
r set x 10
catch {r smove myset2 x foo} err
format $err
} {ERR*}
}

384
test/unit/type/zset.tcl Normal file
View File

@ -0,0 +1,384 @@
start_server default.conf {} {
test {ZSET basic ZADD and score update} {
r zadd ztmp 10 x
r zadd ztmp 20 y
r zadd ztmp 30 z
set aux1 [r zrange ztmp 0 -1]
r zadd ztmp 1 y
set aux2 [r zrange ztmp 0 -1]
list $aux1 $aux2
} {{x y z} {y x z}}
test {ZCARD basics} {
r zcard ztmp
} {3}
test {ZCARD non existing key} {
r zcard ztmp-blabla
} {0}
test {ZRANK basics} {
r zadd zranktmp 10 x
r zadd zranktmp 20 y
r zadd zranktmp 30 z
list [r zrank zranktmp x] [r zrank zranktmp y] [r zrank zranktmp z]
} {0 1 2}
test {ZREVRANK basics} {
list [r zrevrank zranktmp x] [r zrevrank zranktmp y] [r zrevrank zranktmp z]
} {2 1 0}
test {ZRANK - after deletion} {
r zrem zranktmp y
list [r zrank zranktmp x] [r zrank zranktmp z]
} {0 1}
test {ZSCORE} {
set aux {}
set err {}
for {set i 0} {$i < 1000} {incr i} {
set score [expr rand()]
lappend aux $score
r zadd zscoretest $score $i
}
for {set i 0} {$i < 1000} {incr i} {
if {[r zscore zscoretest $i] != [lindex $aux $i]} {
set err "Expected score was [lindex $aux $i] but got [r zscore zscoretest $i] for element $i"
break
}
}
set _ $err
} {}
test {ZSCORE after a DEBUG RELOAD} {
set aux {}
set err {}
r del zscoretest
for {set i 0} {$i < 1000} {incr i} {
set score [expr rand()]
lappend aux $score
r zadd zscoretest $score $i
}
r debug reload
for {set i 0} {$i < 1000} {incr i} {
if {[r zscore zscoretest $i] != [lindex $aux $i]} {
set err "Expected score was [lindex $aux $i] but got [r zscore zscoretest $i] for element $i"
break
}
}
set _ $err
} {}
test {ZRANGE and ZREVRANGE basics} {
list [r zrange ztmp 0 -1] [r zrevrange ztmp 0 -1] \
[r zrange ztmp 1 -1] [r zrevrange ztmp 1 -1]
} {{y x z} {z x y} {x z} {x y}}
test {ZRANGE WITHSCORES} {
r zrange ztmp 0 -1 withscores
} {y 1 x 10 z 30}
test {ZSETs stress tester - sorting is working well?} {
set delta 0
for {set test 0} {$test < 2} {incr test} {
unset -nocomplain auxarray
array set auxarray {}
set auxlist {}
r del myzset
for {set i 0} {$i < 1000} {incr i} {
if {$test == 0} {
set score [expr rand()]
} else {
set score [expr int(rand()*10)]
}
set auxarray($i) $score
r zadd myzset $score $i
# Random update
if {[expr rand()] < .2} {
set j [expr int(rand()*1000)]
if {$test == 0} {
set score [expr rand()]
} else {
set score [expr int(rand()*10)]
}
set auxarray($j) $score
r zadd myzset $score $j
}
}
foreach {item score} [array get auxarray] {
lappend auxlist [list $score $item]
}
set sorted [lsort -command zlistAlikeSort $auxlist]
set auxlist {}
foreach x $sorted {
lappend auxlist [lindex $x 1]
}
set fromredis [r zrange myzset 0 -1]
set delta 0
for {set i 0} {$i < [llength $fromredis]} {incr i} {
if {[lindex $fromredis $i] != [lindex $auxlist $i]} {
incr delta
}
}
}
format $delta
} {0}
test {ZINCRBY - can create a new sorted set} {
r del zset
r zincrby zset 1 foo
list [r zrange zset 0 -1] [r zscore zset foo]
} {foo 1}
test {ZINCRBY - increment and decrement} {
r zincrby zset 2 foo
r zincrby zset 1 bar
set v1 [r zrange zset 0 -1]
r zincrby zset 10 bar
r zincrby zset -5 foo
r zincrby zset -5 bar
set v2 [r zrange zset 0 -1]
list $v1 $v2 [r zscore zset foo] [r zscore zset bar]
} {{bar foo} {foo bar} -2 6}
test {ZRANGEBYSCORE and ZCOUNT basics} {
r del zset
r zadd zset 1 a
r zadd zset 2 b
r zadd zset 3 c
r zadd zset 4 d
r zadd zset 5 e
list [r zrangebyscore zset 2 4] [r zrangebyscore zset (2 (4] \
[r zcount zset 2 4] [r zcount zset (2 (4]
} {{b c d} c 3 1}
test {ZRANGEBYSCORE withscores} {
r del zset
r zadd zset 1 a
r zadd zset 2 b
r zadd zset 3 c
r zadd zset 4 d
r zadd zset 5 e
r zrangebyscore zset 2 4 withscores
} {b 2 c 3 d 4}
test {ZRANGEBYSCORE fuzzy test, 100 ranges in 1000 elements sorted set} {
set err {}
r del zset
for {set i 0} {$i < 1000} {incr i} {
r zadd zset [expr rand()] $i
}
for {set i 0} {$i < 100} {incr i} {
set min [expr rand()]
set max [expr rand()]
if {$min > $max} {
set aux $min
set min $max
set max $aux
}
set low [r zrangebyscore zset -inf $min]
set ok [r zrangebyscore zset $min $max]
set high [r zrangebyscore zset $max +inf]
set lowx [r zrangebyscore zset -inf ($min]
set okx [r zrangebyscore zset ($min ($max]
set highx [r zrangebyscore zset ($max +inf]
if {[r zcount zset -inf $min] != [llength $low]} {
append err "Error, len does not match zcount\n"
}
if {[r zcount zset $min $max] != [llength $ok]} {
append err "Error, len does not match zcount\n"
}
if {[r zcount zset $max +inf] != [llength $high]} {
append err "Error, len does not match zcount\n"
}
if {[r zcount zset -inf ($min] != [llength $lowx]} {
append err "Error, len does not match zcount\n"
}
if {[r zcount zset ($min ($max] != [llength $okx]} {
append err "Error, len does not match zcount\n"
}
if {[r zcount zset ($max +inf] != [llength $highx]} {
append err "Error, len does not match zcount\n"
}
foreach x $low {
set score [r zscore zset $x]
if {$score > $min} {
append err "Error, score for $x is $score > $min\n"
}
}
foreach x $lowx {
set score [r zscore zset $x]
if {$score >= $min} {
append err "Error, score for $x is $score >= $min\n"
}
}
foreach x $ok {
set score [r zscore zset $x]
if {$score < $min || $score > $max} {
append err "Error, score for $x is $score outside $min-$max range\n"
}
}
foreach x $okx {
set score [r zscore zset $x]
if {$score <= $min || $score >= $max} {
append err "Error, score for $x is $score outside $min-$max open range\n"
}
}
foreach x $high {
set score [r zscore zset $x]
if {$score < $max} {
append err "Error, score for $x is $score < $max\n"
}
}
foreach x $highx {
set score [r zscore zset $x]
if {$score <= $max} {
append err "Error, score for $x is $score <= $max\n"
}
}
}
set _ $err
} {}
test {ZRANGEBYSCORE with LIMIT} {
r del zset
r zadd zset 1 a
r zadd zset 2 b
r zadd zset 3 c
r zadd zset 4 d
r zadd zset 5 e
list \
[r zrangebyscore zset 0 10 LIMIT 0 2] \
[r zrangebyscore zset 0 10 LIMIT 2 3] \
[r zrangebyscore zset 0 10 LIMIT 2 10] \
[r zrangebyscore zset 0 10 LIMIT 20 10]
} {{a b} {c d e} {c d e} {}}
test {ZRANGEBYSCORE with LIMIT and withscores} {
r del zset
r zadd zset 10 a
r zadd zset 20 b
r zadd zset 30 c
r zadd zset 40 d
r zadd zset 50 e
r zrangebyscore zset 20 50 LIMIT 2 3 withscores
} {d 40 e 50}
test {ZREMRANGEBYSCORE basics} {
r del zset
r zadd zset 1 a
r zadd zset 2 b
r zadd zset 3 c
r zadd zset 4 d
r zadd zset 5 e
list [r zremrangebyscore zset 2 4] [r zrange zset 0 -1]
} {3 {a e}}
test {ZREMRANGEBYSCORE from -inf to +inf} {
r del zset
r zadd zset 1 a
r zadd zset 2 b
r zadd zset 3 c
r zadd zset 4 d
r zadd zset 5 e
list [r zremrangebyscore zset -inf +inf] [r zrange zset 0 -1]
} {5 {}}
test {ZREMRANGEBYRANK basics} {
r del zset
r zadd zset 1 a
r zadd zset 2 b
r zadd zset 3 c
r zadd zset 4 d
r zadd zset 5 e
list [r zremrangebyrank zset 1 3] [r zrange zset 0 -1]
} {3 {a e}}
test {ZUNION against non-existing key doesn't set destination} {
r del zseta
list [r zunion dst_key 1 zseta] [r exists dst_key]
} {0 0}
test {ZUNION basics} {
r del zseta zsetb zsetc
r zadd zseta 1 a
r zadd zseta 2 b
r zadd zseta 3 c
r zadd zsetb 1 b
r zadd zsetb 2 c
r zadd zsetb 3 d
list [r zunion zsetc 2 zseta zsetb] [r zrange zsetc 0 -1 withscores]
} {4 {a 1 b 3 d 3 c 5}}
test {ZUNION with weights} {
list [r zunion zsetc 2 zseta zsetb weights 2 3] [r zrange zsetc 0 -1 withscores]
} {4 {a 2 b 7 d 9 c 12}}
test {ZUNION with AGGREGATE MIN} {
list [r zunion zsetc 2 zseta zsetb aggregate min] [r zrange zsetc 0 -1 withscores]
} {4 {a 1 b 1 c 2 d 3}}
test {ZUNION with AGGREGATE MAX} {
list [r zunion zsetc 2 zseta zsetb aggregate max] [r zrange zsetc 0 -1 withscores]
} {4 {a 1 b 2 c 3 d 3}}
test {ZINTER basics} {
list [r zinter zsetc 2 zseta zsetb] [r zrange zsetc 0 -1 withscores]
} {2 {b 3 c 5}}
test {ZINTER with weights} {
list [r zinter zsetc 2 zseta zsetb weights 2 3] [r zrange zsetc 0 -1 withscores]
} {2 {b 7 c 12}}
test {ZINTER with AGGREGATE MIN} {
list [r zinter zsetc 2 zseta zsetb aggregate min] [r zrange zsetc 0 -1 withscores]
} {2 {b 1 c 2}}
test {ZINTER with AGGREGATE MAX} {
list [r zinter zsetc 2 zseta zsetb aggregate max] [r zrange zsetc 0 -1 withscores]
} {2 {b 2 c 3}}
test {ZSETs skiplist implementation backlink consistency test} {
set diff 0
set elements 10000
for {set j 0} {$j < $elements} {incr j} {
r zadd myzset [expr rand()] "Element-$j"
r zrem myzset "Element-[expr int(rand()*$elements)]"
}
set l1 [r zrange myzset 0 -1]
set l2 [r zrevrange myzset 0 -1]
for {set j 0} {$j < [llength $l1]} {incr j} {
if {[lindex $l1 $j] ne [lindex $l2 end-$j]} {
incr diff
}
}
format $diff
} {0}
test {ZSETs ZRANK augmented skip list stress testing} {
set err {}
r del myzset
for {set k 0} {$k < 10000} {incr k} {
set i [expr {$k%1000}]
if {[expr rand()] < .2} {
r zrem myzset $i
} else {
set score [expr rand()]
r zadd myzset $score $i
}
set card [r zcard myzset]
if {$card > 0} {
set index [randomInt $card]
set ele [lindex [r zrange myzset $index $index] 0]
set rank [r zrank myzset $ele]
if {$rank != $index} {
set err "$ele RANK is wrong! ($rank != $index)"
break
}
}
}
set _ $err
} {}
}