Merge branch 'unstable' into fix_typo

This commit is contained in:
dangerousor 2024-03-25 08:40:50 +08:00 committed by GitHub
commit d01f8478d6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1171 changed files with 105036 additions and 48469 deletions

View File

@ -1 +1 @@
codespell==2.2.2
codespell==2.2.5

View File

@ -17,7 +17,13 @@ jobs:
sudo apt-get install tcl8.6 tclx
./runtest --verbose --tags -slow --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose --dump-logs
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs
- name: validate commands.def up to date
run: |
touch src/commands/ping.json
make commands.def
dirty=$(git diff)
if [[ ! -z $dirty ]]; then echo $dirty; exit 1; fi
test-sanitizer-address:
runs-on: ubuntu-latest
@ -25,17 +31,20 @@ jobs:
- uses: actions/checkout@v3
- name: make
# build with TLS module just for compilation coverage
run: make SANITIZER=address REDIS_CFLAGS='-Werror' BUILD_TLS=module
run: make SANITIZER=address REDIS_CFLAGS='-Werror -DDEBUG_ASSERTIONS' BUILD_TLS=module
- name: testprep
run: sudo apt-get install tcl8.6 tclx -y
# Work around ASAN issue, see https://github.com/google/sanitizers/issues/1716
run: |
sudo apt-get install tcl8.6 tclx -y
sudo sysctl vm.mmap_rnd_bits=28
- name: test
run: ./runtest --verbose --tags -slow --dump-logs
- name: module api test
run: ./runtest-moduleapi --verbose --dump-logs
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs
build-debian-old:
runs-on: ubuntu-latest
container: debian:oldoldstable
container: debian:buster
steps:
- uses: actions/checkout@v3
- name: make
@ -75,3 +84,4 @@ jobs:
run: |
yum -y install gcc make
make REDIS_CFLAGS='-Werror'

View File

@ -22,12 +22,12 @@ jobs:
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3

32
.github/workflows/coverity.yml vendored Normal file
View File

@ -0,0 +1,32 @@
# Creates and uploads a Coverity build on a schedule
name: Coverity Scan
on:
schedule:
# Run once daily, since below 500k LOC can have 21 builds per week, per https://scan.coverity.com/faq#frequency
- cron: '0 0 * * *'
# Support manual execution
workflow_dispatch:
jobs:
coverity:
if: github.repository == 'redis/redis'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@main
- name: Download and extract the Coverity Build Tool
run: |
wget -q https://scan.coverity.com/download/cxx/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=redis-unstable" -O cov-analysis-linux64.tar.gz
mkdir cov-analysis-linux64
tar xzf cov-analysis-linux64.tar.gz --strip 1 -C cov-analysis-linux64
- name: Install Redis dependencies
run: sudo apt install -y gcc tcl8.6 tclx procps libssl-dev
- name: Build with cov-build
run: cov-analysis-linux64/bin/cov-build --dir cov-int make
- name: Upload the result
run: |
tar czvf cov-int.tgz cov-int
curl \
--form project=redis-unstable \
--form email=${{ secrets.COVERITY_SCAN_EMAIL }} \
--form token=${{ secrets.COVERITY_SCAN_TOKEN }} \
--form file=@cov-int.tgz \
https://scan.coverity.com/builds

View File

@ -11,7 +11,7 @@ on:
inputs:
skipjobs:
description: 'jobs to skip (delete the ones you wanna keep, do not leave empty)'
default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,centos,malloc'
default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,centos,malloc,specific,fortify,reply-schema'
skiptests:
description: 'tests to skip (delete the ones you wanna keep, do not leave empty)'
default: 'redis,modules,sentinel,cluster,unittest'
@ -43,7 +43,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipping: ${{github.event.inputs.skipjobs}} and ${{github.event.inputs.skiptests}}"
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -57,7 +60,51 @@ jobs:
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: ./src/redis-server test all --accurate
test-ubuntu-jemalloc-fortify:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'fortify')
container: ubuntu:lunar
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: |
apt-get update && apt-get install -y make gcc-13
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100
make CC=gcc REDIS_CFLAGS='-Werror -DREDIS_TEST -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3'
- name: testprep
run: apt-get install -y tcl8.6 tclx procps
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -80,6 +127,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -93,7 +144,7 @@ jobs:
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -113,6 +164,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -126,7 +181,7 @@ jobs:
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -146,6 +201,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -163,7 +222,7 @@ jobs:
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time
./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -186,6 +245,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -204,7 +267,7 @@ jobs:
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
@ -226,6 +289,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -244,7 +311,7 @@ jobs:
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
@ -266,6 +333,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -282,6 +353,84 @@ jobs:
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster --config io-threads 4 --config io-threads-do-reads yes ${{github.event.inputs.cluster_test_args}}
test-ubuntu-reclaim-cache:
runs-on: ubuntu-latest
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'specific')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: |
make REDIS_CFLAGS='-Werror'
- name: testprep
run: |
sudo apt-get install vmtouch
mkdir /tmp/master
mkdir /tmp/slave
- name: warm up
run: |
./src/redis-server --daemonize yes --logfile /dev/null
./src/redis-benchmark -n 1 > /dev/null
./src/redis-cli save | grep OK > /dev/null
vmtouch -v ./dump.rdb > /dev/null
- name: test
run: |
echo "test SAVE doesn't increase cache"
CACHE0=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}')
echo "$CACHE0"
./src/redis-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080 --repl-diskless-sync no --pidfile /tmp/master/redis.pid --rdbcompression no --enable-debug-command yes
./src/redis-cli -p 8080 debug populate 10000 k 102400
./src/redis-server --daemonize yes --logfile /dev/null --dir /tmp/slave --port 8081 --repl-diskless-load disabled --rdbcompression no
./src/redis-cli -p 8080 save > /dev/null
VMOUT=$(vmtouch -v /tmp/master/dump.rdb)
echo $VMOUT
grep -q " 0%" <<< $VMOUT
CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}')
echo "$CACHE"
if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi
echo "test replication doesn't increase cache"
./src/redis-cli -p 8081 REPLICAOF 127.0.0.1 8080 > /dev/null
while [ $(./src/redis-cli -p 8081 info replication | grep "master_link_status:down") ]; do sleep 1; done;
sleep 1 # wait for the completion of cache reclaim bio
VMOUT=$(vmtouch -v /tmp/master/dump.rdb)
echo $VMOUT
grep -q " 0%" <<< $VMOUT
VMOUT=$(vmtouch -v /tmp/slave/dump.rdb)
echo $VMOUT
grep -q " 0%" <<< $VMOUT
CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}')
echo "$CACHE"
if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi
echo "test reboot doesn't increase cache"
PID=$(cat /tmp/master/redis.pid)
kill -15 $PID
while [ -x /proc/${PID} ]; do sleep 1; done
./src/redis-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080
while [ $(./src/redis-cli -p 8080 info persistence | grep "loading:1") ]; do sleep 1; done;
sleep 1 # wait for the completion of cache reclaim bio
VMOUT=$(vmtouch -v /tmp/master/dump.rdb)
echo $VMOUT
grep -q " 0%" <<< $VMOUT
CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}')
echo "$CACHE"
if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi
test-valgrind-test:
runs-on: ubuntu-latest
if: |
@ -294,6 +443,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -320,6 +473,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -332,11 +489,11 @@ jobs:
sudo apt-get install tcl8.6 tclx valgrind -y
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: |
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all --valgrind
if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-valgrind-no-malloc-usable-size-test:
@ -351,6 +508,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -377,6 +538,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -389,11 +554,11 @@ jobs:
sudo apt-get install tcl8.6 tclx valgrind -y
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}}
- name: unittest
if: true && !contains(github.event.inputs.skiptests, 'unittest')
run: |
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all
valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/redis-server test all --valgrind
if grep -q 0x err.txt; then cat err.txt; exit 1; fi
test-sanitizer-address:
@ -413,22 +578,28 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make SANITIZER=address REDIS_CFLAGS='-DREDIS_TEST -Werror'
run: make SANITIZER=address REDIS_CFLAGS='-DREDIS_TEST -Werror -DDEBUG_ASSERTIONS'
- name: testprep
# Work around ASAN issue, see https://github.com/google/sanitizers/issues/1716
run: |
sudo apt-get update
sudo apt-get install tcl8.6 tclx -y
sudo sysctl vm.mmap_rnd_bits=28
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -456,6 +627,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -471,7 +646,7 @@ jobs:
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -495,6 +670,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -510,7 +689,7 @@ jobs:
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -531,6 +710,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -551,7 +734,7 @@ jobs:
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}}
CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
@ -574,6 +757,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -594,7 +781,7 @@ jobs:
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: |
./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: |
@ -616,6 +803,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -624,10 +815,10 @@ jobs:
run: make REDIS_CFLAGS='-Werror'
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --accurate --verbose --no-latency --dump-logs ${{github.event.inputs.test_args}}
run: ./runtest --accurate --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --no-latency --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}}
test-macos-latest-sentinel:
runs-on: macos-latest
@ -641,6 +832,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -663,6 +858,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -673,11 +872,40 @@ jobs:
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
build-macos:
strategy:
matrix:
os: [macos-11, macos-13]
runs-on: ${{ matrix.os }}
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'macos')
timeout-minutes: 14400
steps:
- uses: maxim-lobanov/setup-xcode@v1
with:
xcode-version: latest
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make REDIS_CFLAGS='-Werror -DREDIS_TEST'
test-freebsd:
runs-on: macos-12
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'freebsd') && !(contains(github.event.inputs.skiptests, 'redis') && contains(github.event.inputs.skiptests, 'modules'))
!contains(github.event.inputs.skipjobs, 'freebsd')
timeout-minutes: 14400
steps:
- name: prep
@ -690,70 +918,16 @@ jobs:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: test
uses: vmactions/freebsd-vm@v0.3.0
uses: cross-platform-actions/action@v0.22.0
with:
usesh: true
sync: rsync
copyback: false
prepare: pkg install -y bash gmake lang/tcl86 lang/tclx
run: >
gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq redis ; then ./runtest --verbose --timeout 2400 --no-latency --dump-logs ${{github.event.inputs.test_args}} || exit 1 ; fi ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq modules ; then MAKE=gmake ./runtest-moduleapi --verbose --timeout 2400 --no-latency --dump-logs ${{github.event.inputs.test_args}} || exit 1 ; fi ;
test-freebsd-sentinel:
runs-on: macos-12
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'sentinel')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: test
uses: vmactions/freebsd-vm@v0.3.0
with:
usesh: true
sync: rsync
copyback: false
prepare: pkg install -y bash gmake lang/tcl86 lang/tclx
run: >
gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq sentinel ; then ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} || exit 1 ; fi ;
test-freebsd-cluster:
runs-on: macos-12
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'freebsd') && !contains(github.event.inputs.skiptests, 'cluster')
timeout-minutes: 14400
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: test
uses: vmactions/freebsd-vm@v0.3.0
with:
usesh: true
sync: rsync
copyback: false
prepare: pkg install -y bash gmake lang/tcl86 lang/tclx
run: >
gmake || exit 1 ;
if echo "${{github.event.inputs.skiptests}}" | grep -vq cluster ; then ./runtest-cluster ${{github.event.inputs.cluster_test_args}} || exit 1 ; fi ;
operating_system: freebsd
environment_variables: MAKE
version: 13.2
shell: bash
run: |
sudo pkg install -y bash gmake lang/tcl86 lang/tclx
gmake
./runtest --single unit/keyspace --single unit/auth --single unit/networking --single unit/protocol
test-alpine-jemalloc:
runs-on: ubuntu-latest
@ -767,6 +941,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -782,7 +960,7 @@ jobs:
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
@ -802,6 +980,10 @@ jobs:
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
@ -817,10 +999,54 @@ jobs:
run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}}
reply-schemas-validator:
runs-on: ubuntu-latest
timeout-minutes: 14400
if: |
(github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'redis/redis')) &&
!contains(github.event.inputs.skipjobs, 'reply-schema')
steps:
- name: prep
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV
echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV
echo "skipjobs: ${{github.event.inputs.skipjobs}}"
echo "skiptests: ${{github.event.inputs.skiptests}}"
echo "test_args: ${{github.event.inputs.test_args}}"
echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}"
- uses: actions/checkout@v3
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- name: make
run: make REDIS_CFLAGS='-Werror -DLOG_REQ_RES'
- name: testprep
run: sudo apt-get install tcl8.6 tclx
- name: test
if: true && !contains(github.event.inputs.skiptests, 'redis')
run: ./runtest --log-req-res --no-latency --dont-clean --force-resp3 --tags -slow --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: module api test
if: true && !contains(github.event.inputs.skiptests, 'modules')
run: CFLAGS='-Werror' ./runtest-moduleapi --log-req-res --no-latency --dont-clean --force-resp3 --dont-pre-clean --verbose --dump-logs ${{github.event.inputs.test_args}}
- name: sentinel tests
if: true && !contains(github.event.inputs.skiptests, 'sentinel')
run: ./runtest-sentinel --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}}
- name: cluster tests
if: true && !contains(github.event.inputs.skiptests, 'cluster')
run: ./runtest-cluster --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}}
- name: Install Python dependencies
uses: py-actions/py-dependency-install@v4
with:
path: "./utils/req-res-validator/requirements.txt"
- name: validator
run: ./utils/req-res-log-validator.py --verbose --fail-missing-reply-schemas ${{ (!contains(github.event.inputs.skiptests, 'redis') && !contains(github.event.inputs.skiptests, 'module') && !contains(github.event.inputs.sentinel, 'redis') && !contains(github.event.inputs.skiptests, 'cluster')) && github.event.inputs.test_args == '' && github.event.inputs.cluster_test_args == '' && '--fail-commands-not-all-hit' || '' }}

View File

@ -23,6 +23,7 @@ jobs:
run: |
./runtest \
--host 127.0.0.1 --port 6379 \
--verbose \
--tags -slow
- name: Archive redis log
if: ${{ failure() }}
@ -49,6 +50,7 @@ jobs:
run: |
./runtest \
--host 127.0.0.1 --port 6379 \
--verbose \
--cluster-mode \
--tags -slow
- name: Archive redis log
@ -73,6 +75,7 @@ jobs:
run: |
./runtest \
--host 127.0.0.1 --port 6379 \
--verbose \
--tags "-slow -needs:debug"
- name: Archive redis log
if: ${{ failure() }}

View File

@ -0,0 +1,22 @@
name: Reply-schemas linter
on:
push:
paths:
- 'src/commands/*.json'
pull_request:
paths:
- 'src/commands/*.json'
jobs:
reply-schemas-linter:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup nodejs
uses: actions/setup-node@v4
- name: Install packages
run: npm install ajv
- name: linter
run: node ./utils/reply_schema_linter.js

View File

@ -19,7 +19,7 @@ jobs:
uses: actions/checkout@v3
- name: pip cache
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}

View File

@ -26,7 +26,7 @@ Examples of unacceptable behavior include:
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others private information, such as a physical or email
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
@ -89,7 +89,7 @@ Attribution
This Code of Conduct is adapted from the Contributor Covenant,
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by Mozillas code of conduct
Community Impact Guidelines were inspired by Mozilla's code of conduct
enforcement ladder.
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at

View File

@ -1,20 +1,82 @@
Note: by contributing code to the Redis project in any form, including sending
a pull request via Github, a code fragment or patch via private email or
public discussion groups, you agree to release your code under the terms
of the BSD license that you can find in the COPYING file included in the Redis
source distribution. You will include BSD license in the COPYING file within
each source file that you contribute.
By contributing code to the Redis project in any form you agree to the Redis Software Grant and
Contributor License Agreement attached below. Only contributions made under the Redis Software Grant
and Contributor License Agreement may be accepted by Redis, and any contribution is subject to the
terms of the Redis dual-license under RSALv2/SSPLv1 as described in the LICENSE.txt file included in
the Redis source distribution.
# REDIS SOFTWARE GRANT AND CONTRIBUTOR LICENSE AGREEMENT
To specify the intellectual property license granted in any Contribution, Redis Ltd., ("**Redis**")
requires a Software Grant and Contributor License Agreement ("**Agreement**"). This Agreement is for
your protection as a contributor as well as the protection of Redis and its users; it does not
change your rights to use your own Contribution for any other purpose.
By making any Contribution, You accept and agree to the following terms and conditions for the
Contribution. Except for the license granted in this Agreement to Redis and the recipients of the
software distributed by Redis, You reserve all right, title, and interest in and to Your
Contribution.
1. **Definitions**
1.1. "**You**" (or "**Your**") means the copyright owner or legal entity authorized by the
copyright owner that is entering into this Agreement with Redis. For legal entities, the entity
making a Contribution and all other entities that Control, are Controlled by, or are under
common Control with that entity are considered to be a single contributor. For the purposes of
this definition, "**Control**" means (i) the power, direct or indirect, to cause the direction
or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty
percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
1.2. "**Contribution**" means the code, documentation, or any original work of authorship,
including any modifications or additions to an existing work described above.
2. "**Work**" means any software project stewarded by Redis.
3. **Grant of Copyright License**. Subject to the terms and conditions of this Agreement, You grant
to Redis and to the recipients of the software distributed by Redis a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare
derivative works of, publicly display, publicly perform, sublicense, and distribute Your
Contribution and such derivative works.
4. **Grant of Patent License**. Subject to the terms and conditions of this Agreement, You grant to
Redis and to the recipients of the software distributed by Redis a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent
license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable by You that are necessarily
infringed by Your Contribution alone or by a combination of Your Contribution with the Work to
which such Contribution was submitted. If any entity institutes patent litigation against You or
any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your
Contribution, or the Work to which you have contributed, constitutes a direct or contributory
patent infringement, then any patent licenses granted to the claimant entity under this Agreement
for that Contribution or Work terminate as of the date such litigation is filed.
5. **Representations and Warranties**. You represent and warrant that: (i) You are legally entitled
to grant the above licenses; and (ii) if You are an entity, each employee or agent designated by
You is authorized to submit the Contribution on behalf of You; and (iii) your Contribution is
Your original work, and that it will not infringe on any third party's intellectual property
right(s).
6. **Disclaimer**. You are not expected to provide support for Your Contribution, except to the
extent You desire to provide support. You may provide support for free, for a fee, or not at all.
Unless required by applicable law or agreed to in writing, You provide Your Contribution on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT,
MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
7. **Enforceability**. Nothing in this Agreement will be construed as creating any joint venture,
employment relationship, or partnership between You and Redis. If any provision of this Agreement
is held to be unenforceable, the remaining provisions of this Agreement will not be affected.
This represents the entire agreement between You and Redis relating to the Contribution.
# IMPORTANT: HOW TO USE REDIS GITHUB ISSUES
Github issues SHOULD ONLY BE USED to report bugs, and for DETAILED feature
requests. Everything else belongs to the Redis Google Group:
GitHub issues SHOULD ONLY BE USED to report bugs and for DETAILED feature
requests. Everything else should be asked on Discord:
https://groups.google.com/forum/m/#!forum/Redis-db
https://discord.com/invite/redis
PLEASE DO NOT POST GENERAL QUESTIONS that are not about bugs or suspected
bugs in the Github issues system. We'll be very happy to help you and provide
all the support in the mailing list.
bugs in the GitHub issues system. We'll be delighted to help you and provide
all the support on Discord.
There is also an active community of Redis users at Stack Overflow:
@ -33,24 +95,24 @@ straight away: if your feature is not a conceptual fit you'll lose a lot of
time writing the code without any reason. Start by posting in the mailing list
and creating an issue at Github with the description of, exactly, what you want
to accomplish and why. Use cases are important for features to be accepted.
Here you'll see if there is consensus about your idea.
Here you can see if there is consensus about your idea.
2. If in step 1 you get an acknowledgment from the project leaders, use the
following procedure to submit a patch:
a. Fork Redis on github ( https://docs.github.com/en/github/getting-started-with-github/fork-a-repo )
a. Fork Redis on GitHub ( https://docs.github.com/en/github/getting-started-with-github/fork-a-repo )
b. Create a topic branch (git checkout -b my_branch)
c. Push to your branch (git push origin my_branch)
d. Initiate a pull request on github ( https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request )
d. Initiate a pull request on GitHub ( https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request )
e. Done :)
3. Keep in mind that we are very overloaded, so issues and PRs sometimes wait
for a *very* long time. However this is not lack of interest, as the project
for a *very* long time. However this is not a lack of interest, as the project
gets more and more users, we find ourselves in a constant need to prioritize
certain issues/PRs over others. If you think your issue/PR is very important
try to popularize it, have other users commenting and sharing their point of
view and so forth. This helps.
view, and so forth. This helps.
4. For minor fixes just open a pull request on Github.
4. For minor fixes - open a pull request on GitHub.
Thanks!
Additional information on the RSALv2/SSPLv1 dual-license is also found in the LICENSE.txt file.

10
COPYING
View File

@ -1,10 +0,0 @@
Copyright (c) 2006-2020, Salvatore Sanfilippo
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Redis nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

733
LICENSE.txt Normal file
View File

@ -0,0 +1,733 @@
Starting on March 20th, 2024, Redis follows a dual-licensing model with all Redis project code
contributions under version 7.4 and subsequent releases governed by the Redis Software Grant and
Contributor License Agreement. After this date, contributions are subject to the user's choice of
the Redis Source Available License v2 (RSALv2) or the Server Side Public License v1 (SSPLv1), as
follows:
1. Redis Source Available License 2.0 (RSALv2) Agreement
========================================================
Last Update: December 30, 2023
Acceptance
----------
This Agreement sets forth the terms and conditions on which the Licensor
makes available the Software. By installing, downloading, accessing,
Using, or distributing any of the Software, You agree to all of the
terms and conditions of this Agreement.
If You are receiving the Software on behalf of Your Company, You
represent and warrant that You have the authority to agree to this
Agreement on behalf of such entity.
The Licensor reserves the right to update this Agreement from time to
time.
The terms below have the meanings set forth below for purposes of this
Agreement:
Definitions
-----------
Agreement: this Redis Source Available License 2.0 Agreement.
Control: ownership, directly or indirectly, of substantially all the
assets of an entity, or the power to direct its management and policies
by vote, contract, or otherwise.
License: the License as described in the License paragraph below.
Licensor: the entity offering these terms, which includes Redis Ltd. on
behalf of itself and its subsidiaries and affiliates worldwide.
Modify, Modified, or Modification: copy from or adapt all or part of the
work in a fashion requiring copyright permission other than making an
exact copy. The resulting work is called a Modified version of the
earlier work.
Redis: the Redis software as described in redis.com redis.io.
Software: certain Software components designed to work with Redis and
provided to You under this Agreement.
Trademark: the trademarks, service marks, and any other similar rights.
Use: anything You do with the Software requiring one of Your Licenses.
You: the recipient of the Software, the individual or entity on whose
behalf You are agreeing to this Agreement.
Your Company: any legal entity, sole proprietorship, or other kind of
organization that You work for, plus all organizations that have control
over, are under the control of, or are under common control with that
organization.
Your Licenses: means all the Licenses granted to You for the Software
under this Agreement.
License
-------
The Licensor grants You a non-exclusive, royalty-free, worldwide,
non-sublicensable, non-transferable license to use, copy, distribute,
make available, and prepare derivative works of the Software, in each
case subject to the limitations and conditions below.
Limitations
-----------
You may not make the functionality of the Software or a Modified version
available to third parties as a service or distribute the Software or a
Modified version in a manner that makes the functionality of the
Software available to third parties.
Making the functionality of the Software or Modified version available
to third parties includes, without limitation, enabling third parties to
interact with the functionality of the Software or Modified version in
distributed form or remotely through a computer network, offering a
product or service, the value of which entirely or primarily derives
from the value of the Software or Modified version, or offering a
product or service that accomplishes for users the primary purpose of
the Software or Modified version.
You may not alter, remove, or obscure any licensing, copyright, or other
notices of the Licensor in the Software. Any use of the Licensor's
Trademarks is subject to applicable law.
Patents
-------
The Licensor grants You a License, under any patent claims the Licensor
can License, or becomes able to License, to make, have made, use, sell,
offer for sale, import and have imported the Software, in each case
subject to the limitations and conditions in this License. This License
does not cover any patent claims that You cause to be infringed by
Modifications or additions to the Software. If You or Your Company make
any written claim that the Software infringes or contributes to
infringement of any patent, your patent License for the Software granted
under this Agreement ends immediately. If Your Company makes such a
claim, your patent License ends immediately for work on behalf of Your
Company.
Notices
-------
You must ensure that anyone who gets a copy of any part of the Software
from You also gets a copy of the terms and conditions in this Agreement.
If You modify the Software, You must include in any Modified copies of
the Software prominent notices stating that You have Modified the
Software.
No Other Rights
---------------
The terms and conditions of this Agreement do not imply any Licenses
other than those expressly granted in this Agreement.
Termination
-----------
If You Use the Software in violation of this Agreement, such Use is not
Licensed, and Your Licenses will automatically terminate. If the
Licensor provides You with a notice of your violation, and You cease all
violations of this License no later than 30 days after You receive that
notice, Your Licenses will be reinstated retroactively. However, if You
violate this Agreement after such reinstatement, any additional
violation of this Agreement will cause your Licenses to terminate
automatically and permanently.
No Liability
------------
As far as the law allows, the Software comes as is, without any
warranty or condition, and the Licensor will not be liable to You for
any damages arising out of this Agreement or the Use or nature of the
Software, under any kind of legal claim.
Governing Law and Jurisdiction
------------------------------
If You are located in Asia, Pacific, Americas, or other jurisdictions
not listed below, the Agreement will be construed and enforced in all
respects in accordance with the laws of the State of California, U.S.A.,
without reference to its choice of law rules. The courts located in the
County of Santa Clara, California, have exclusive jurisdiction for all
purposes relating to this Agreement.
If You are located in Israel, the Agreement will be construed and
enforced in all respects in accordance with the laws of the State of
Israel without reference to its choice of law rules. The courts located
in the Central District of the State of Israel have exclusive
jurisdiction for all purposes relating to this Agreement.
If You are located in Europe, United Kingdom, Middle East or Africa, the
Agreement will be construed and enforced in all respects in accordance
with the laws of England and Wales without reference to its choice of
law rules. The competent courts located in London, England, have
exclusive jurisdiction for all purposes relating to this Agreement.
2. Server Side Public License (SSPL)
====================================
Server Side Public License
VERSION 1, OCTOBER 16, 2018
Copyright (c) 2018 MongoDB, Inc.
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to Server Side Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work in
a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based on
the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through a
computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices" to the
extent that it includes a convenient and prominently visible feature that
(1) displays an appropriate copyright notice, and (2) tells the user that
there is no warranty for the work (except to the extent that warranties
are provided), that licensees may convey the work under this License, and
how to view a copy of this License. If the interface presents a list of
user commands or options, such as a menu, a prominent item in the list
meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work for
making modifications to it. "Object code" means any non-source form of a
work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that is
widely used among developers working in that language. The "System
Libraries" of an executable work include anything, other than the work as
a whole, that (a) is included in the normal form of packaging a Major
Component, but which is not part of that Major Component, and (b) serves
only to enable use of the work with that Major Component, or to implement
a Standard Interface for which an implementation is available to the
public in source code form. A "Major Component", in this context, means a
major essential component (kernel, window system, and so on) of the
specific operating system (if any) on which the executable work runs, or
a compiler used to produce the work, or an object code interpreter used
to run it.
The "Corresponding Source" for a work in object code form means all the
source code needed to generate, install, and (for an executable work) run
the object code and to modify the work, including scripts to control
those activities. However, it does not include the work's System
Libraries, or general-purpose tools or generally available free programs
which are used unmodified in performing those activities but which are
not part of the work. For example, Corresponding Source includes
interface definition files associated with source files for the work, and
the source code for shared libraries and dynamically linked subprograms
that the work is specifically designed to require, such as by intimate
data communication or control flow between those subprograms and other
parts of the work.
The Corresponding Source need not include anything that users can
regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program, subject to section 13. The
output from running a covered work is covered by this License only if the
output, given its content, constitutes a covered work. This License
acknowledges your rights of fair use or other equivalent, as provided by
copyright law. Subject to section 13, you may make, run and propagate
covered works that you do not convey, without conditions so long as your
license otherwise remains in force. You may convey covered works to
others for the sole purpose of having them make modifications exclusively
for you, or provide you with facilities for running those works, provided
that you comply with the terms of this License in conveying all
material for which you do not control copyright. Those thus making or
running the covered works for you must do so exclusively on your
behalf, under your direction and control, on terms that prohibit them
from making any copies of your copyrighted material outside their
relationship with you.
Conveying under any other circumstances is permitted solely under the
conditions stated below. Sublicensing is not allowed; section 10 makes it
unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article 11
of the WIPO copyright treaty adopted on 20 December 1996, or similar laws
prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention is
effected by exercising rights under this License with respect to the
covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's users,
your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice; keep
intact all notices stating that this License and any non-permissive terms
added in accord with section 7 apply to the code; keep intact all notices
of the absence of any warranty; and give all recipients a copy of this
License along with the Program. You may charge any price or no price for
each copy that you convey, and you may offer support or warranty
protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the terms
of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified it,
and giving a relevant date.
b) The work must carry prominent notices stating that it is released
under this License and any conditions added under section 7. This
requirement modifies the requirement in section 4 to "keep intact all
notices".
c) You must license the entire work, as a whole, under this License to
anyone who comes into possession of a copy. This License will therefore
apply, along with any applicable section 7 additional terms, to the
whole of the work, and all its parts, regardless of how they are
packaged. This License gives no permission to license the work in any
other way, but it does not invalidate such permission if you have
separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your work
need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work, and
which are not combined with it such as to form a larger program, in or on
a volume of a storage or distribution medium, is called an "aggregate" if
the compilation and its resulting copyright are not used to limit the
access or legal rights of the compilation's users beyond what the
individual works permit. Inclusion of a covered work in an aggregate does
not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of
sections 4 and 5, provided that you also convey the machine-readable
Corresponding Source under the terms of this License, in one of these
ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium customarily
used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a written
offer, valid for at least three years and valid for as long as you
offer spare parts or customer support for that product model, to give
anyone who possesses the object code either (1) a copy of the
Corresponding Source for all the software in the product that is
covered by this License, on a durable physical medium customarily used
for software interchange, for a price no more than your reasonable cost
of physically performing this conveying of source, or (2) access to
copy the Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This alternative is
allowed only occasionally and noncommercially, and only if you received
the object code with such an offer, in accord with subsection 6b.
d) Convey the object code by offering access from a designated place
(gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to copy
the object code is a network server, the Corresponding Source may be on
a different server (operated by you or a third party) that supports
equivalent copying facilities, provided you maintain clear directions
next to the object code saying where to find the Corresponding Source.
Regardless of what server hosts the Corresponding Source, you remain
obligated to ensure that it is available for as long as needed to
satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided you
inform other peers where the object code and Corresponding Source of
the work are being offered to the general public at no charge under
subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be included
in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as part
of a transaction in which the right of possession and use of the User
Product is transferred to the recipient in perpetuity or for a fixed term
(regardless of how the transaction is characterized), the Corresponding
Source conveyed under this section must be accompanied by the
Installation Information. But this requirement does not apply if neither
you nor any third party retains the ability to install modified object
code on the User Product (for example, the work has been installed in
ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access
to a network may be denied when the modification itself materially
and adversely affects the operation of the network or violates the
rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in
accord with this section must be in a format that is publicly documented
(and with an implementation available to the public in source code form),
and must require no special password or key for unpacking, reading or
copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall be
treated as though they were included in this License, to the extent that
they are valid under applicable law. If additional permissions apply only
to part of the Program, that part may be used separately under those
permissions, but the entire Program remains governed by this License
without regard to the additional permissions. When you convey a copy of
a covered work, you may at your option remove any additional permissions
from that copy, or from any part of it. (Additional permissions may be
written to require their own removal in certain cases when you modify the
work.) You may place additional permissions on material, added by you to
a covered work, for which you have or can give appropriate copyright
permission.
Notwithstanding any other provision of this License, for material you add
to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some trade
names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that material
by anyone who conveys the material (or modified versions of it) with
contractual assumptions of liability to the recipient, for any
liability that these contractual assumptions directly impose on those
licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further restriction,
you may remove that term. If a license document contains a further
restriction but permits relicensing or conveying under this License, you
may add to a covered work material governed by the terms of that license
document, provided that the further restriction does not survive such
relicensing or conveying.
If you add terms to a covered work in accord with this section, you must
place, in the relevant source files, a statement of the additional terms
that apply to those files, or a notice indicating where to find the
applicable terms. Additional terms, permissive or non-permissive, may be
stated in the form of a separately written license, or stated as
exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or modify
it is void, and will automatically terminate your rights under this
License (including any patent licenses granted under the third paragraph
of section 11).
However, if you cease all violation of this License, then your license
from a particular copyright holder is reinstated (a) provisionally,
unless and until the copyright holder explicitly and finally terminates
your license, and (b) permanently, if the copyright holder fails to
notify you of the violation by some reasonable means prior to 60 days
after the cessation.
Moreover, your license from a particular copyright holder is reinstated
permanently if the copyright holder notifies you of the violation by some
reasonable means, this is the first time you have received notice of
violation of this License (for any work) from that copyright holder, and
you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a
copy of the Program. Ancillary propagation of a covered work occurring
solely as a consequence of using peer-to-peer transmission to receive a
copy likewise does not require acceptance. However, nothing other than
this License grants you permission to propagate or modify any covered
work. These actions infringe copyright if you do not accept this License.
Therefore, by modifying or propagating a covered work, you indicate your
acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives
a license from the original licensors, to run, modify and propagate that
work, subject to this License. You are not responsible for enforcing
compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered work
results from an entity transaction, each party to that transaction who
receives a copy of the work also receives whatever licenses to the work
the party's predecessor in interest had or could give under the previous
paragraph, plus a right to possession of the Corresponding Source of the
work from the predecessor in interest, if the predecessor has it or can
get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights
granted or affirmed under this License. For example, you may not impose a
license fee, royalty, or other charge for exercise of rights granted
under this License, and you may not initiate litigation (including a
cross-claim or counterclaim in a lawsuit) alleging that any patent claim
is infringed by making, using, selling, offering for sale, or importing
the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The work
thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims owned or
controlled by the contributor, whether already acquired or hereafter
acquired, that would be infringed by some manner, permitted by this
License, of making, using, or selling its contributor version, but do not
include claims that would be infringed only as a consequence of further
modification of the contributor version. For purposes of this definition,
"control" includes the right to grant patent sublicenses in a manner
consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to make,
use, sell, offer for sale, import and otherwise run, modify and propagate
the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a party
means to make such an agreement or commitment not to enforce a patent
against the party.
If you convey a covered work, knowingly relying on a patent license, and
the Corresponding Source of the work is not available for anyone to copy,
free of charge and under the terms of this License, through a publicly
available network server or other readily accessible means, then you must
either (1) cause the Corresponding Source to be so available, or (2)
arrange to deprive yourself of the benefit of the patent license for this
particular work, or (3) arrange, in a manner consistent with the
requirements of this License, to extend the patent license to downstream
recipients. "Knowingly relying" means you have actual knowledge that, but
for the patent license, your conveying the covered work in a country, or
your recipient's use of the covered work in a country, would infringe
one or more identifiable patents in that country that you have reason
to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties receiving
the covered work authorizing them to use, propagate, modify or convey a
specific copy of the covered work, then the patent license you grant is
automatically extended to all recipients of the covered work and works
based on it.
A patent license is "discriminatory" if it does not include within the
scope of its coverage, prohibits the exercise of, or is conditioned on
the non-exercise of one or more of the rights that are specifically
granted under this License. You may not convey a covered work if you are
a party to an arrangement with a third party that is in the business of
distributing software, under which you make payment to the third party
based on the extent of your activity of conveying the work, and under
which the third party grants, to any of the parties who would receive the
covered work from you, a discriminatory patent license (a) in connection
with copies of the covered work conveyed by you (or copies made from
those copies), or (b) primarily for and in connection with specific
products or compilations that contain the covered work, unless you
entered into that arrangement, or that patent license was granted, prior
to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any
implied license or other defenses to infringement that may otherwise be
available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot use,
propagate or convey a covered work so as to satisfy simultaneously your
obligations under this License and any other pertinent obligations, then
as a consequence you may not use, propagate or convey it at all. For
example, if you agree to terms that obligate you to collect a royalty for
further conveying from those to whom you convey the Program, the only way
you could satisfy both those terms and this License would be to refrain
entirely from conveying the Program.
13. Offering the Program as a Service.
If you make the functionality of the Program or a modified version
available to third parties as a service, you must make the Service Source
Code available via network download to everyone at no charge, under the
terms of this License. Making the functionality of the Program or
modified version available to third parties as a service includes,
without limitation, enabling third parties to interact with the
functionality of the Program or modified version remotely through a
computer network, offering a service the value of which entirely or
primarily derives from the value of the Program or modified version, or
offering a service that accomplishes for users the primary purpose of the
Program or modified version.
"Service Source Code" means the Corresponding Source for the Program or
the modified version, and the Corresponding Source for all programs that
you use to make the Program or modified version available as a service,
including, without limitation, management software, user interfaces,
application program interfaces, automation software, monitoring software,
backup software, storage software and hosting software, all such that a
user could run an instance of the service using the Service Source Code
you make available.
14. Revised Versions of this License.
MongoDB, Inc. may publish revised and/or new versions of the Server Side
Public License from time to time. Such new versions will be similar in
spirit to the present version, but may differ in detail to address new
problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies that a certain numbered version of the Server Side Public
License "or any later version" applies to it, you have the option of
following the terms and conditions either of that numbered version or of
any later version published by MongoDB, Inc. If the Program does not
specify a version number of the Server Side Public License, you may
choose any version ever published by MongoDB, Inc.
If the Program specifies that a proxy can decide which future versions of
the Server Side Public License can be used, that proxy's public statement
of acceptance of a version permanently authorizes you to choose that
version for the Program.
Later license versions may give you additional or different permissions.
However, no additional obligations are imposed on any author or copyright
holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING
ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF
THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU
OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above
cannot be given local legal effect according to their terms, reviewing
courts shall apply local law that most closely approximates an absolute
waiver of all civil liability in connection with the Program, unless a
warranty or assumption of liability accompanies a copy of the Program in
return for a fee.
END OF TERMS AND CONDITIONS

View File

@ -217,19 +217,29 @@ You'll be able to stop and start Redis using the script named
Code contributions
-----------------
Note: By contributing code to the Redis project in any form, including sending
a pull request via Github, a code fragment or patch via private email or
public discussion groups, you agree to release your code under the terms
of the BSD license that you can find in the [COPYING][1] file included in the Redis
source distribution.
By contributing code to the Redis project in any form, including sending a pull request via GitHub,
a code fragment or patch via private email or public discussion groups, you agree to release your
code under the terms of the [Redis Software Grant and Contributor License Agreement][1]. Redis software
contains contributions to the original Redis core project, which are owned by their contributors and
licensed under the 3BSD license. Any copy of that license in this repository applies only to those
contributions. Redis releases all Redis project versions from 7.4.x and thereafter under the
RSALv2/SSPL dual-license as described in the [LICENSE.txt][2] file included in the Redis source distribution.
Please see the [CONTRIBUTING.md][2] file in this source distribution for more
information. For security bugs and vulnerabilities, please see [SECURITY.md][3].
Please see the [CONTRIBUTING.md][1] file in this source distribution for more information. For
security bugs and vulnerabilities, please see [SECURITY.md][3].
[1]: https://github.com/redis/redis/blob/unstable/COPYING
[2]: https://github.com/redis/redis/blob/unstable/CONTRIBUTING.md
[1]: https://github.com/redis/redis/blob/unstable/CONTRIBUTING.md
[2]: https://github.com/redis/redis/blob/unstable/LICENSE.txt
[3]: https://github.com/redis/redis/blob/unstable/SECURITY.md
Redis Trademarks
----------------
The purpose of a trademark is to identify the goods and services of a person or company without
causing confusion. As the registered owner of its name and logo, Redis accepts certain limited uses
of its trademarks but it has requirements that must be followed as described in its Trademark
Guidelines available at: https://redis.com/legal/trademark-guidelines/.
Redis internals
===
@ -420,7 +430,7 @@ implementations are the following:
* `lookupKeyRead()` and `lookupKeyWrite()` are used in order to get a pointer to the value associated to a given key, or `NULL` if the key does not exist.
* `dbAdd()` and its higher level counterpart `setKey()` create a new key in a Redis database.
* `dbDelete()` removes a key and its associated value.
* `emptyDb()` removes an entire single database or all the databases defined.
* `emptyData()` removes an entire single database or all the databases defined.
The rest of the file implements the generic commands exposed to the client.

30
REDISCONTRIBUTIONS.txt Normal file
View File

@ -0,0 +1,30 @@
Copyright (c) 2006-Present, Redis Ltd. and Contributors
All rights reserved.
Note: Continued Applicability of the BSD-3-Clause License
Despite the shift to the dual-licensing model with Redis version 7.4 (RSALv2 or SSPLv1), portions of
Redis remain available subject to the BSD-3-Clause License (BSD). See below for the full BSD
license:
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -11,17 +11,17 @@ unless this is not possible or feasible with a reasonable effort.
| Version | Supported |
| ------- | ------------------ |
| 7.2.x | :white_check_mark: |
| 7.0.x | :white_check_mark: |
| 6.2.x | :white_check_mark: |
| 6.0.x | :white_check_mark: |
| < 6.0 | :x: |
| < 6.2 | :x: |
## Reporting a Vulnerability
If you believe youve discovered a serious vulnerability, please contact the
If you believe you've discovered a serious vulnerability, please contact the
Redis core team at redis@redis.io. We will evaluate your report and if
necessary issue a fix and an advisory. If the issue was previously undisclosed,
well also mention your name in the credits.
we'll also mention your name in the credits.
## Responsible Disclosure
@ -36,7 +36,7 @@ embargo on public disclosure.
Vendors on the list are individuals or organizations that maintain Redis
distributions or provide Redis as a service, who have third party users who
will benefit from the vendors ability to prepare for a new version or deploy a
will benefit from the vendor's ability to prepare for a new version or deploy a
fix early.
If you believe you should be on the list, please contact us and we will

14
deps/Makefile vendored
View File

@ -3,6 +3,7 @@
uname_S:= $(shell sh -c 'uname -s 2>/dev/null || echo not')
LUA_DEBUG?=no
LUA_COVERAGE?=no
CCCOLOR="\033[34m"
LINKCOLOR="\033[34;1m"
@ -85,6 +86,11 @@ ifeq ($(LUA_DEBUG),yes)
else
LUA_CFLAGS+= -O2
endif
ifeq ($(LUA_COVERAGE),yes)
LUA_CFLAGS += -fprofile-arcs -ftest-coverage
LUA_LDFLAGS += -fprofile-arcs -ftest-coverage
endif
# lua's Makefile defines AR="ar rcu", which is unusual, and makes it more
# challenging to cross-compile lua (and redis). These defines make it easier
# to fit redis into cross-compilation environments, which typically set AR.
@ -97,8 +103,8 @@ lua: .make-prerequisites
.PHONY: lua
JEMALLOC_CFLAGS= -std=gnu99 -Wall -pipe -g3 -O3 -funroll-loops $(CFLAGS)
JEMALLOC_LDFLAGS= $(LDFLAGS)
JEMALLOC_CFLAGS=$(CFLAGS)
JEMALLOC_LDFLAGS=$(LDFLAGS)
ifneq ($(DEB_HOST_GNU_TYPE),)
JEMALLOC_CONFIGURE_OPTS += --host=$(DEB_HOST_GNU_TYPE)
@ -106,7 +112,7 @@ endif
jemalloc: .make-prerequisites
@printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR)
cd jemalloc && ./configure --with-version=5.2.1-0-g0 --with-lg-quantum=3 --with-jemalloc-prefix=je_ CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" $(JEMALLOC_CONFIGURE_OPTS)
cd jemalloc && $(MAKE) CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" lib/libjemalloc.a
cd jemalloc && ./configure --disable-cxx --with-version=5.3.0-0-g0 --with-lg-quantum=3 --disable-cache-oblivious --with-jemalloc-prefix=je_ CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" $(JEMALLOC_CONFIGURE_OPTS)
cd jemalloc && $(MAKE) lib/libjemalloc.a
.PHONY: jemalloc

9
deps/README.md vendored
View File

@ -61,12 +61,11 @@ cd deps/jemalloc
Hiredis
---
Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care:
Hiredis is used by Sentinel, `redis-cli` and `redis-benchmark`. Like Redis, uses the SDS string library, but not necessarily the same version. In order to avoid conflicts, this version has all SDS identifiers prefixed by `hi`.
1. Check with diff if hiredis API changed and what impact it could have in Redis.
2. Make sure that the SDS library inside Hiredis and inside Redis are compatible.
3. After the upgrade, run the Redis Sentinel test.
4. Check manually that redis-cli and redis-benchmark behave as expected, since we have no tests for CLI utilities currently.
1. `git subtree pull --prefix deps/hiredis https://github.com/redis/hiredis.git <version-tag> --squash`<br>
This should hopefully merge the local changes into the new version.
2. Conflicts will arise (due to our changes) you'll need to resolve them and commit.
Linenoise
---

View File

@ -0,0 +1,49 @@
name-template: '$NEXT_MAJOR_VERSION'
tag-template: 'v$NEXT_MAJOR_VERSION'
autolabeler:
- label: 'maintenance'
files:
- '*.md'
- '.github/*'
- label: 'bug'
branch:
- '/bug-.+'
- label: 'maintenance'
branch:
- '/maintenance-.+'
- label: 'feature'
branch:
- '/feature-.+'
categories:
- title: 'Breaking Changes'
labels:
- 'breakingchange'
- title: '🧪 Experimental Features'
labels:
- 'experimental'
- title: '🚀 New Features'
labels:
- 'feature'
- 'enhancement'
- title: '🐛 Bug Fixes'
labels:
- 'fix'
- 'bugfix'
- 'bug'
- 'BUG'
- title: '🧰 Maintenance'
label: 'maintenance'
change-template: '- $TITLE @$AUTHOR (#$NUMBER)'
exclude-labels:
- 'skip-changelog'
template: |
## Changes
$CHANGES
## Contributors
We'd like to thank all the contributors who worked on this release!
$CONTRIBUTORS

View File

@ -6,24 +6,21 @@ jobs:
name: Ubuntu
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- uses: actions/checkout@v3
- name: Install dependencies
run: |
sudo add-apt-repository -y ppa:chris-lea/redis-server
curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list
sudo apt-get update
sudo apt-get install -y redis-server valgrind libevent-dev
- name: Build using cmake
env:
env:
EXTRA_CMAKE_OPTS: -DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON -DENABLE_SSL_TESTS:BOOL=ON -DENABLE_ASYNC_TESTS:BOOL=ON
CFLAGS: -Werror
CXXFLAGS: -Werror
run: mkdir build-ubuntu && cd build-ubuntu && cmake ..
run: mkdir build && cd build && cmake .. && make
- name: Build using makefile
run: USE_SSL=1 TEST_ASYNC=1 make
@ -45,11 +42,7 @@ jobs:
runs-on: ubuntu-latest
container: centos:7
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- uses: actions/checkout@v3
- name: Install dependencies
run: |
@ -58,11 +51,11 @@ jobs:
yum -y install gcc gcc-c++ make openssl openssl-devel cmake3 valgrind libevent-devel
- name: Build using cmake
env:
env:
EXTRA_CMAKE_OPTS: -DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON -DENABLE_SSL_TESTS:BOOL=ON -DENABLE_ASYNC_TESTS:BOOL=ON
CFLAGS: -Werror
CXXFLAGS: -Werror
run: mkdir build-centos7 && cd build-centos7 && cmake3 ..
run: mkdir build && cd build && cmake3 .. && make
- name: Build using Makefile
run: USE_SSL=1 TEST_ASYNC=1 make
@ -85,25 +78,22 @@ jobs:
runs-on: ubuntu-latest
container: rockylinux:8
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- uses: actions/checkout@v3
- name: Install dependencies
run: |
dnf -y upgrade --refresh
dnf -y install https://rpms.remirepo.net/enterprise/remi-release-8.rpm
dnf -y module install redis:remi-6.0
dnf -y group install "Development Tools"
dnf -y install openssl-devel cmake valgrind libevent-devel
- name: Build using cmake
env:
env:
EXTRA_CMAKE_OPTS: -DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON -DENABLE_SSL_TESTS:BOOL=ON -DENABLE_ASYNC_TESTS:BOOL=ON
CFLAGS: -Werror
CXXFLAGS: -Werror
run: mkdir build-centos8 && cd build-centos8 && cmake ..
run: mkdir build && cd build && cmake .. && make
- name: Build using Makefile
run: USE_SSL=1 TEST_ASYNC=1 make
@ -122,17 +112,13 @@ jobs:
run: $GITHUB_WORKSPACE/test.sh
freebsd:
runs-on: macos-10.15
runs-on: macos-12
name: FreeBSD
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- uses: actions/checkout@v3
- name: Build in FreeBSD
uses: vmactions/freebsd-vm@v0.1.5
uses: vmactions/freebsd-vm@v0
with:
prepare: pkg install -y gmake cmake
run: |
@ -143,15 +129,12 @@ jobs:
name: macOS
runs-on: macos-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- uses: actions/checkout@v3
- name: Install dependencies
run: |
brew install openssl redis
brew install openssl redis@7.0
brew link redis@7.0 --force
- name: Build hiredis
run: USE_SSL=1 make
@ -165,11 +148,7 @@ jobs:
name: Windows
runs-on: windows-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
repository: ${{ env.GITHUB_REPOSITORY }}
ref: ${{ env.GITHUB_HEAD_REF }}
- uses: actions/checkout@v3
- name: Install dependencies
run: |
@ -186,20 +165,13 @@ jobs:
run: |
./build/hiredis-test.exe
- name: Setup cygwin
uses: egor-tensin/setup-cygwin@v3
- name: Install Cygwin Action
uses: cygwin/cygwin-install-action@v2
with:
platform: x64
packages: make git gcc-core
- name: Build in cygwin
env:
HIREDIS_PATH: ${{ github.workspace }}
run: |
build_hiredis() {
cd $(cygpath -u $HIREDIS_PATH)
git clean -xfd
make
}
build_hiredis
shell: C:\tools\cygwin\bin\bash.exe --login --norc -eo pipefail -o igncr '{0}'
make clean && make

View File

@ -0,0 +1,19 @@
name: Release Drafter
on:
push:
# branches to consider in the event; optional, defaults to all
branches:
- master
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
# Drafts your next Release notes as Pull Requests are merged into "master"
- uses: release-drafter/release-drafter@v5
with:
# (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
config-name: release-drafter-config.yml
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

100
deps/hiredis/.github/workflows/test.yml vendored Normal file
View File

@ -0,0 +1,100 @@
name: C/C++ CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
full-build:
name: Build all, plus default examples, run tests against redis
runs-on: ubuntu-latest
env:
# the docker image used by the test.sh
REDIS_DOCKER: redis:alpine
steps:
- name: Install prerequisites
run: sudo apt-get update && sudo apt-get install -y libev-dev libevent-dev libglib2.0-dev libssl-dev valgrind
- uses: actions/checkout@v3
- name: Run make
run: make all examples
- name: Run unittests
run: make check
- name: Run tests with valgrind
env:
TEST_PREFIX: valgrind --error-exitcode=100
SKIPS_ARG: --skip-throughput
run: make check
build-32-bit:
name: Build and test minimal 32 bit linux
runs-on: ubuntu-latest
steps:
- name: Install prerequisites
run: sudo apt-get update && sudo apt-get install gcc-multilib
- uses: actions/checkout@v3
- name: Run make
run: make all
env:
PLATFORM_FLAGS: -m32
- name: Run unittests
env:
REDIS_DOCKER: redis:alpine
run: make check
build-arm:
name: Cross-compile and test arm linux with Qemu
runs-on: ubuntu-latest
strategy:
matrix:
include:
- name: arm
toolset: arm-linux-gnueabi
emulator: qemu-arm
- name: aarch64
toolset: aarch64-linux-gnu
emulator: qemu-aarch64
steps:
- name: Install qemu
if: matrix.emulator
run: sudo apt-get update && sudo apt-get install -y qemu-user
- name: Install platform toolset
if: matrix.toolset
run: sudo apt-get install -y gcc-${{matrix.toolset}}
- uses: actions/checkout@v3
- name: Run make
run: make all
env:
CC: ${{matrix.toolset}}-gcc
AR: ${{matrix.toolset}}-ar
- name: Run unittests
env:
REDIS_DOCKER: redis:alpine
TEST_PREFIX: ${{matrix.emulator}} -L /usr/${{matrix.toolset}}/
run: make check
build-windows:
name: Build and test on windows 64 bit Intel
runs-on: windows-latest
steps:
- uses: microsoft/setup-msbuild@v1.0.2
- uses: actions/checkout@v3
- name: Run CMake (shared lib)
run: cmake -Wno-dev CMakeLists.txt
- name: Build hiredis (shared lib)
run: MSBuild hiredis.vcxproj /p:Configuration=Debug
- name: Run CMake (static lib)
run: cmake -Wno-dev CMakeLists.txt -DBUILD_SHARED_LIBS=OFF
- name: Build hiredis (static lib)
run: MSBuild hiredis.vcxproj /p:Configuration=Debug
- name: Build hiredis-test
run: MSBuild hiredis-test.vcxproj /p:Configuration=Debug
# use memurai, redis compatible server, since it is easy to install. Can't
# install official redis containers on the windows runner
- name: Install Memurai redis server
run: choco install -y memurai-developer.install
- name: Run tests
run: Debug\hiredis-test.exe

View File

@ -1,3 +1,219 @@
## [1.2.0](https://github.com/redis/hiredis/tree/v1.2.0) - (2023-06-04)
Announcing Hiredis v1.2.0 with with new adapters, and a great many bug fixes.
## 🚀 New Features
- Add sdevent adapter @Oipo (#1144)
- Allow specifying the keepalive interval @michael-grunder (#1168)
- Add RedisModule adapter @tezc (#1182)
- Helper for setting TCP_USER_TIMEOUT socket option @zuiderkwast (#1188)
## 🐛 Bug Fixes
- Fix a typo in b6a052f. @yossigo (#1190)
- Fix wincrypt symbols conflict @hudayou (#1151)
- Don't attempt to set a timeout if we are in an error state. @michael-grunder (#1180)
- Accept -nan per the RESP3 spec recommendation. @michael-grunder (#1178)
- Fix colliding option values @zuiderkwast (#1172)
- Ensure functionality without `_MSC_VER` definition @windyakin (#1194)
## 🧰 Maintenance
- Add a test for the TCP_USER_TIMEOUT option. @michael-grunder (#1192)
- Add -Werror as a default. @yossigo (#1193)
- CI: Update homebrew Redis version. @yossigo (#1191)
- Fix typo in makefile. @michael-grunder (#1179)
- Write a version file for the CMake package @Neverlord (#1165)
- CMakeLists.txt: respect BUILD_SHARED_LIBS @ffontaine (#1147)
- Cmake static or shared @autoantwort (#1160)
- fix typo @tillkruss (#1153)
- Add a test ensuring we don't clobber connection error. @michael-grunder (#1181)
- Search for openssl on macOS @michael-grunder (#1169)
## Contributors
We'd like to thank all the contributors who worked on this release!
<a href="https://github.com/neverlord"><img src="https://github.com/neverlord.png" width="32" height="32"></a>
<a href="https://github.com/Oipo"><img src="https://github.com/Oipo.png" width="32" height="32"></a>
<a href="https://github.com/autoantwort"><img src="https://github.com/autoantwort.png" width="32" height="32"></a>
<a href="https://github.com/ffontaine"><img src="https://github.com/ffontaine.png" width="32" height="32"></a>
<a href="https://github.com/hudayou"><img src="https://github.com/hudayou.png" width="32" height="32"></a>
<a href="https://github.com/michael-grunder"><img src="https://github.com/michael-grunder.png" width="32" height="32"></a>
<a href="https://github.com/postgraph"><img src="https://github.com/postgraph.png" width="32" height="32"></a>
<a href="https://github.com/tezc"><img src="https://github.com/tezc.png" width="32" height="32"></a>
<a href="https://github.com/tillkruss"><img src="https://github.com/tillkruss.png" width="32" height="32"></a>
<a href="https://github.com/vityafx"><img src="https://github.com/vityafx.png" width="32" height="32"></a>
<a href="https://github.com/windyakin"><img src="https://github.com/windyakin.png" width="32" height="32"></a>
<a href="https://github.com/yossigo"><img src="https://github.com/yossigo.png" width="32" height="32"></a>
<a href="https://github.com/zuiderkwast"><img src="https://github.com/zuiderkwast.png" width="32" height="32"></a>
## [1.1.0](https://github.com/redis/hiredis/tree/v1.1.0) - (2022-11-15)
Announcing Hiredis v1.1.0 GA with better SSL convenience, new async adapters and a great many bug fixes.
**NOTE**: Hiredis can now return `nan` in addition to `-inf` and `inf` when returning a `REDIS_REPLY_DOUBLE`.
## 🐛 Bug Fixes
- Add support for nan in RESP3 double [@filipecosta90](https://github.com/filipecosta90)
([\#1133](https://github.com/redis/hiredis/pull/1133))
## 🧰 Maintenance
- Add an example that calls redisCommandArgv [@michael-grunder](https://github.com/michael-grunder)
([\#1140](https://github.com/redis/hiredis/pull/1140))
- fix flag reference [@pata00](https://github.com/pata00) ([\#1136](https://github.com/redis/hiredis/pull/1136))
- Make freeing a NULL redisAsyncContext a no op. [@michael-grunder](https://github.com/michael-grunder)
([\#1135](https://github.com/redis/hiredis/pull/1135))
- CI updates ([@bjosv](https://github.com/redis/bjosv) ([\#1139](https://github.com/redis/hiredis/pull/1139))
## Contributors
We'd like to thank all the contributors who worked on this release!
<a href="https://github.com/bjsov"><img src="https://github.com/bjosv.png" width="32" height="32"></a>
<a href="https://github.com/filipecosta90"><img src="https://github.com/filipecosta90.png" width="32" height="32"></a>
<a href="https://github.com/michael-grunder"><img src="https://github.com/michael-grunder.png" width="32" height="32"></a>
<a href="https://github.com/pata00"><img src="https://github.com/pata00.png" width="32" height="32"></a>
## [1.1.0-rc1](https://github.com/redis/hiredis/tree/v1.1.0-rc1) - (2022-11-06)
Announcing Hiredis v1.1.0-rc1, with better SSL convenience, new async adapters, and a great many bug fixes.
## 🚀 New Features
- Add possibility to prefer IPv6, IPv4 or unspecified [@zuiderkwast](https://github.com/zuiderkwast)
([\#1096](https://github.com/redis/hiredis/pull/1096))
- Add adapters/libhv [@ithewei](https://github.com/ithewei) ([\#904](https://github.com/redis/hiredis/pull/904))
- Add timeout support to libhv adapter. [@michael-grunder](https://github.com/michael-grunder) ([\#1109](https://github.com/redis/hiredis/pull/1109))
- set default SSL verification path [@adobeturchenko](https://github.com/adobeturchenko) ([\#928](https://github.com/redis/hiredis/pull/928))
- Introduce .close method for redisContextFuncs [@pizhenwei](https://github.com/pizhenwei) ([\#1094](https://github.com/redis/hiredis/pull/1094))
- Make it possible to set SSL verify mode [@stanhu](https://github.com/stanhu) ([\#1085](https://github.com/redis/hiredis/pull/1085))
- Polling adapter and example [@kristjanvalur](https://github.com/kristjanvalur) ([\#932](https://github.com/redis/hiredis/pull/932))
- Unsubscribe handling in async [@bjosv](https://github.com/bjosv) ([\#1047](https://github.com/redis/hiredis/pull/1047))
- Add timeout support for libuv adapter [@MichaelSuen-thePointer](https://github.com/@MichaelSuenthePointer) ([\#1016](https://github.com/redis/hiredis/pull/1016))
## 🐛 Bug Fixes
- Update for MinGW cross compile [@bit0fun](https://github.com/bit0fun) ([\#1127](https://github.com/redis/hiredis/pull/1127))
- fixed CPP build error with adapters/libhv.h [@mtdxc](https://github.com/mtdxc) ([\#1125](https://github.com/redis/hiredis/pull/1125))
- Fix protocol error
[@michael-grunder](https://github.com/michael-grunder),
[@mtuleika-appcast](https://github.com/mtuleika-appcast) ([\#1106](https://github.com/redis/hiredis/pull/1106))
- Use a windows specific keepalive function. [@michael-grunder](https://github.com/michael-grunder) ([\#1104](https://github.com/redis/hiredis/pull/1104))
- Fix CMake config path on Linux. [@xkszltl](https://github.com/xkszltl) ([\#989](https://github.com/redis/hiredis/pull/989))
- Fix potential fault at createDoubleObject [@afcidk](https://github.com/afcidk) ([\#964](https://github.com/redis/hiredis/pull/964))
- Fix some undefined behavior [@jengab](https://github.com/jengab) ([\#1091](https://github.com/redis/hiredis/pull/1091))
- Copy OOM errors to redisAsyncContext when finding subscribe callback [@bjosv](https://github.com/bjosv) ([\#1090](https://github.com/redis/hiredis/pull/1090))
- Maintain backward compatibility with our onConnect callback. [@michael-grunder](https://github.com/michael-grunder) ([\#1087](https://github.com/redis/hiredis/pull/1087))
- Fix PUSH handler tests for Redis >= 7.0.5 [@michael-grunder](https://github.com/michael-grunder) ([\#1121](https://github.com/redis/hiredis/pull/1121))
- fix heap-buffer-overflow [@zhangtaoXT5](https://github.com/zhangtaoXT5) ([\#957](https://github.com/redis/hiredis/pull/957))
- Fix heap-buffer-overflow issue in redisvFormatCommad [@bjosv](https://github.com/bjosv) ([\#1097](https://github.com/redis/hiredis/pull/1097))
- Polling adapter requires sockcompat.h [@michael-grunder](https://github.com/michael-grunder) ([\#1095](https://github.com/redis/hiredis/pull/1095))
- Illumos test fixes, error message difference for bad hostname test. [@devnexen](https://github.com/devnexen) ([\#901](https://github.com/redis/hiredis/pull/901))
- Remove semicolon after do-while in \_EL\_CLEANUP [@sundb](https://github.com/sundb) ([\#905](https://github.com/redis/hiredis/pull/905))
- Stability: Support calling redisAsyncCommand and redisAsyncDisconnect from the onConnected callback [@kristjanvalur](https://github.com/kristjanvalur)
([\#931](https://github.com/redis/hiredis/pull/931))
- Fix async connect on Windows [@kristjanvalur](https://github.com/kristjanvalur) ([\#1073](https://github.com/redis/hiredis/pull/1073))
- Fix tests so they work for Redis 7.0 [@michael-grunder](https://github.com/michael-grunder) ([\#1072](https://github.com/redis/hiredis/pull/1072))
- Fix warnings on Win64 [@orgads](https://github.com/orgads) ([\#1058](https://github.com/redis/hiredis/pull/1058))
- Handle push notifications before or after reply. [@yossigo](https://github.com/yossigo) ([\#1062](https://github.com/redis/hiredis/pull/1062))
- Update hiredis sds with improvements found in redis [@bjosv](https://github.com/bjosv) ([\#1045](https://github.com/redis/hiredis/pull/1045))
- Avoid incorrect call to the previous reply's callback [@bjosv](https://github.com/bjosv) ([\#1040](https://github.com/redis/hiredis/pull/1040))
- fix building on AIX and SunOS [\#1031](https://github.com/redis/hiredis/pull/1031) ([@scddev](https://github.com/scddev))
- Allow sending commands after sending an unsubscribe [@bjosv](https://github.com/bjosv) ([\#1036](https://github.com/redis/hiredis/pull/1036))
- Correction for command timeout during pubsub [@bjosv](https://github.com/bjosv) ([\#1038](https://github.com/redis/hiredis/pull/1038))
- Fix adapters/libevent.h compilation for 64-bit Windows [@pbtummillo](https://github.com/pbtummillo) ([\#937](https://github.com/redis/hiredis/pull/937))
- Fix integer overflow when format command larger than 4GB [@sundb](https://github.com/sundb) ([\#1030](https://github.com/redis/hiredis/pull/1030))
- Handle array response during subscribe in RESP3 [@bjosv](https://github.com/bjosv) ([\#1014](https://github.com/redis/hiredis/pull/1014))
- Support PING while subscribing (RESP2) [@bjosv](https://github.com/bjosv) ([\#1027](https://github.com/redis/hiredis/pull/1027))
## 🧰 Maintenance
- CI fixes in preparation of release [@michael-grunder](https://github.com/michael-grunder) ([\#1130](https://github.com/redis/hiredis/pull/1130))
- Add do while(0) (protection for macros [@afcidk](https://github.com/afcidk) [\#959](https://github.com/redis/hiredis/pull/959))
- Fixup of PR734: Coverage of hiredis.c [@bjosv](https://github.com/bjosv) ([\#1124](https://github.com/redis/hiredis/pull/1124))
- CMake corrections for building on Windows [@bjosv](https://github.com/bjosv) ([\#1122](https://github.com/redis/hiredis/pull/1122))
- Install on windows fixes [@bjosv](https://github.com/bjosv) ([\#1117](https://github.com/redis/hiredis/pull/1117))
- Add libhv example to our standard Makefile [@michael-grunder](https://github.com/michael-grunder) ([\#1108](https://github.com/redis/hiredis/pull/1108))
- Additional include directory given by pkg-config [@bjosv](https://github.com/bjosv) ([\#1118](https://github.com/redis/hiredis/pull/1118))
- Use __attribute__ when building with Clang on Windows [@bjosv](https://github.com/bjosv) ([\#1115](https://github.com/redis/hiredis/pull/1115))
- Minor refactor [@michael-grunder](https://github.com/michael-grunder) ([\#1110](https://github.com/redis/hiredis/pull/1110))
- Fix pkgconfig result for hiredis_ssl [@bjosv](https://github.com/bjosv) ([\#1107](https://github.com/redis/hiredis/pull/1107))
- Update documentation to explain redisConnectWithOptions. [@michael-grunder](https://github.com/michael-grunder) ([\#1099](https://github.com/redis/hiredis/pull/1099))
- uvadapter: reduce number of uv_poll_start calls [@noxiouz](https://github.com/noxiouz) ([\#1098](https://github.com/redis/hiredis/pull/1098))
- Regression test for off-by-one parsing error [@bugwz](https://github.com/bugwz) ([\#1092](https://github.com/redis/hiredis/pull/1092))
- CMake: remove dict.c form hiredis_sources [@Lipraxde](https://github.com/Lipraxde) ([\#1055](https://github.com/redis/hiredis/pull/1055))
- Do store command timeout in the context for redisSetTimeout [@catterer](https://github.com/catterer) ([\#593](https://github.com/redis/hiredis/pull/593), [\#1093](https://github.com/redis/hiredis/pull/1093))
- Add GitHub Actions CI workflow for hiredis: Arm, Arm64, 386, windows. [@kristjanvalur](https://github.com/kristjanvalur) ([\#943](https://github.com/redis/hiredis/pull/943))
- CI: bump macOS runner version [@SukkaW](https://github.com/SukkaW) ([\#1079](https://github.com/redis/hiredis/pull/1079))
- Support for generating release notes [@chayim](https://github.com/chayim) ([\#1083](https://github.com/redis/hiredis/pull/1083))
- Improve example for SSL initialization in README.md [@stanhu](https://github.com/stanhu) ([\#1084](https://github.com/redis/hiredis/pull/1084))
- Fix README typos [@bjosv](https://github.com/bjosv) ([\#1080](https://github.com/redis/hiredis/pull/1080))
- fix cmake version [@smmir-cent](https://github.com/@smmircent) ([\#1050](https://github.com/redis/hiredis/pull/1050))
- Use the same name for static and shared libraries [@orgads](https://github.com/orgads) ([\#1057](https://github.com/redis/hiredis/pull/1057))
- Embed debug information in windows static .lib file [@kristjanvalur](https://github.com/kristjanvalur) ([\#1054](https://github.com/redis/hiredis/pull/1054))
- Improved async documentation [@kristjanvalur](https://github.com/kristjanvalur) ([\#1074](https://github.com/redis/hiredis/pull/1074))
- Use official repository for redis package. [@yossigo](https://github.com/yossigo) ([\#1061](https://github.com/redis/hiredis/pull/1061))
- Whitelist hiredis repo path in cygwin [@michael-grunder](https://github.com/michael-grunder) ([\#1063](https://github.com/redis/hiredis/pull/1063))
- CentOS 8 is EOL, switch to RockyLinux [@michael-grunder](https://github.com/michael-grunder) ([\#1046](https://github.com/redis/hiredis/pull/1046))
- CMakeLists.txt: allow building without a C++ compiler [@ffontaine](https://github.com/ffontaine) ([\#872](https://github.com/redis/hiredis/pull/872))
- Makefile: move SSL options into a block and refine rules [@pizhenwei](https://github.com/pizhenwei) ([\#997](https://github.com/redis/hiredis/pull/997))
- Update CMakeLists.txt for more portability [@EricDeng1001](https://github.com/EricDeng1001) ([\#1005](https://github.com/redis/hiredis/pull/1005))
- FreeBSD build fixes + CI [@michael-grunder](https://github.com/michael-grunder) ([\#1026](https://github.com/redis/hiredis/pull/1026))
- Add asynchronous test for pubsub using RESP3 [@bjosv](https://github.com/bjosv) ([\#1012](https://github.com/redis/hiredis/pull/1012))
- Trigger CI failure when Valgrind issues are found [@bjosv](https://github.com/bjosv) ([\#1011](https://github.com/redis/hiredis/pull/1011))
- Move to using make directly in Cygwin [@michael-grunder](https://github.com/michael-grunder) ([\#1020](https://github.com/redis/hiredis/pull/1020))
- Add asynchronous API tests [@bjosv](https://github.com/bjosv) ([\#1010](https://github.com/redis/hiredis/pull/1010))
- Correcting the build target `coverage` for enabled SSL [@bjosv](https://github.com/bjosv) ([\#1009](https://github.com/redis/hiredis/pull/1009))
- GH Actions: Run SSL tests during CI [@bjosv](https://github.com/bjosv) ([\#1008](https://github.com/redis/hiredis/pull/1008))
- GH: Actions - Add valgrind and CMake [@michael-grunder](https://github.com/michael-grunder) ([\#1004](https://github.com/redis/hiredis/pull/1004))
- Add Centos8 tests in GH Actions [@michael-grunder](https://github.com/michael-grunder) ([\#1001](https://github.com/redis/hiredis/pull/1001))
- We should run actions on PRs [@michael-grunder](https://github.com/michael-grunder) (([\#1000](https://github.com/redis/hiredis/pull/1000))
- Add Cygwin test in GitHub actions [@michael-grunder](https://github.com/michael-grunder) ([\#999](https://github.com/redis/hiredis/pull/999))
- Add Windows tests in GitHub actions [@michael-grunder](https://github.com/michael-grunder) ([\#996](https://github.com/redis/hiredis/pull/996))
- Switch to GitHub actions [@michael-grunder](https://github.com/michael-grunder) ([\#995](https://github.com/redis/hiredis/pull/995))
- Minor refactor of CVE-2021-32765 fix. [@michael-grunder](https://github.com/michael-grunder) ([\#993](https://github.com/redis/hiredis/pull/993))
- Remove extra comma from CMake var. [@xkszltl](https://github.com/xkszltl) ([\#988](https://github.com/redis/hiredis/pull/988))
- Add REDIS\_OPT\_PREFER\_UNSPEC [@michael-grunder](https://github.com/michael-grunder) ([\#1101](https://github.com/redis/hiredis/pull/1101))
## Contributors
We'd like to thank all the contributors who worked on this release!
<a href="https://github.com/EricDeng1001"><img src="https://github.com/EricDeng1001.png" width="32" height="32"></a>
<a href="https://github.com/Lipraxde"><img src="https://github.com/Lipraxde.png" width="32" height="32"></a>
<a href="https://github.com/MichaelSuen-thePointer"><img src="https://github.com/MichaelSuen-thePointer.png" width="32" height="32"></a>
<a href="https://github.com/SukkaW"><img src="https://github.com/SukkaW.png" width="32" height="32"></a>
<a href="https://github.com/adobeturchenko"><img src="https://github.com/adobeturchenko.png" width="32" height="32"></a>
<a href="https://github.com/afcidk"><img src="https://github.com/afcidk.png" width="32" height="32"></a>
<a href="https://github.com/bit0fun"><img src="https://github.com/bit0fun.png" width="32" height="32"></a>
<a href="https://github.com/bjosv"><img src="https://github.com/bjosv.png" width="32" height="32"></a>
<a href="https://github.com/bugwz"><img src="https://github.com/bugwz.png" width="32" height="32"></a>
<a href="https://github.com/catterer"><img src="https://github.com/catterer.png" width="32" height="32"></a>
<a href="https://github.com/chayim"><img src="https://github.com/chayim.png" width="32" height="32"></a>
<a href="https://github.com/devnexen"><img src="https://github.com/devnexen.png" width="32" height="32"></a>
<a href="https://github.com/ffontaine"><img src="https://github.com/ffontaine.png" width="32" height="32"></a>
<a href="https://github.com/ithewei"><img src="https://github.com/ithewei.png" width="32" height="32"></a>
<a href="https://github.com/jengab"><img src="https://github.com/jengab.png" width="32" height="32"></a>
<a href="https://github.com/kristjanvalur"><img src="https://github.com/kristjanvalur.png" width="32" height="32"></a>
<a href="https://github.com/michael-grunder"><img src="https://github.com/michael-grunder.png" width="32" height="32"></a>
<a href="https://github.com/noxiouz"><img src="https://github.com/noxiouz.png" width="32" height="32"></a>
<a href="https://github.com/mtdxc"><img src="https://github.com/mtdxc.png" width="32" height="32"></a>
<a href="https://github.com/orgads"><img src="https://github.com/orgads.png" width="32" height="32"></a>
<a href="https://github.com/pbtummillo"><img src="https://github.com/pbtummillo.png" width="32" height="32"></a>
<a href="https://github.com/pizhenwei"><img src="https://github.com/pizhenwei.png" width="32" height="32"></a>
<a href="https://github.com/scddev"><img src="https://github.com/scddev.png" width="32" height="32"></a>
<a href="https://github.com/smmir-cent"><img src="https://github.com/smmir-cent.png" width="32" height="32"></a>
<a href="https://github.com/stanhu"><img src="https://github.com/stanhu.png" width="32" height="32"></a>
<a href="https://github.com/sundb"><img src="https://github.com/sundb.png" width="32" height="32"></a>
<a href="https://github.com/vturchenko"><img src="https://github.com/vturchenko.png" width="32" height="32"></a>
<a href="https://github.com/xkszltl"><img src="https://github.com/xkszltl.png" width="32" height="32"></a>
<a href="https://github.com/yossigo"><img src="https://github.com/yossigo.png" width="32" height="32"></a>
<a href="https://github.com/zhangtaoXT5"><img src="https://github.com/zhangtaoXT5.png" width="32" height="32"></a>
<a href="https://github.com/zuiderkwast"><img src="https://github.com/zuiderkwast.png" width="32" height="32"></a>
## [1.0.2](https://github.com/redis/hiredis/tree/v1.0.2) - (2021-10-07)
Announcing Hiredis v1.0.2, which fixes CVE-2021-32765 but returns the SONAME to the correct value of `1.0.0`.

View File

@ -1,8 +1,10 @@
CMAKE_MINIMUM_REQUIRED(VERSION 3.4.0)
CMAKE_MINIMUM_REQUIRED(VERSION 3.0.0)
OPTION(BUILD_SHARED_LIBS "Build shared libraries" ON)
OPTION(ENABLE_SSL "Build hiredis_ssl for SSL support" OFF)
OPTION(DISABLE_TESTS "If tests should be compiled or not" OFF)
OPTION(ENABLE_SSL_TESTS "Should we test SSL connections" OFF)
OPTION(ENABLE_EXAMPLES "Enable building hiredis examples" OFF)
OPTION(ENABLE_ASYNC_TESTS "Should we run all asynchronous API tests" OFF)
MACRO(getVersionBit name)
@ -24,15 +26,11 @@ INCLUDE(GNUInstallDirs)
# Hiredis requires C99
SET(CMAKE_C_STANDARD 99)
SET(CMAKE_POSITION_INDEPENDENT_CODE ON)
SET(CMAKE_DEBUG_POSTFIX d)
SET(ENABLE_EXAMPLES OFF CACHE BOOL "Enable building hiredis examples")
SET(hiredis_sources
alloc.c
async.c
dict.c
hiredis.c
net.c
read.c
@ -42,34 +40,30 @@ SET(hiredis_sources
SET(hiredis_sources ${hiredis_sources})
IF(WIN32)
ADD_COMPILE_DEFINITIONS(_CRT_SECURE_NO_WARNINGS WIN32_LEAN_AND_MEAN)
ADD_DEFINITIONS(-D_CRT_SECURE_NO_WARNINGS -DWIN32_LEAN_AND_MEAN)
ENDIF()
ADD_LIBRARY(hiredis SHARED ${hiredis_sources})
ADD_LIBRARY(hiredis_static STATIC ${hiredis_sources})
ADD_LIBRARY(hiredis ${hiredis_sources})
ADD_LIBRARY(hiredis::hiredis ALIAS hiredis)
ADD_LIBRARY(hiredis::hiredis_static ALIAS hiredis_static)
set(hiredis_export_name hiredis CACHE STRING "Name of the exported target")
set_target_properties(hiredis PROPERTIES EXPORT_NAME ${hiredis_export_name})
SET_TARGET_PROPERTIES(hiredis
PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE
VERSION "${HIREDIS_SONAME}")
SET_TARGET_PROPERTIES(hiredis_static
PROPERTIES COMPILE_PDB_NAME hiredis_static)
SET_TARGET_PROPERTIES(hiredis_static
PROPERTIES COMPILE_PDB_NAME_DEBUG hiredis_static${CMAKE_DEBUG_POSTFIX})
IF(WIN32 OR MINGW)
IF(MSVC)
SET_TARGET_PROPERTIES(hiredis
PROPERTIES COMPILE_FLAGS /Z7)
ENDIF()
IF(WIN32)
TARGET_LINK_LIBRARIES(hiredis PUBLIC ws2_32 crypt32)
TARGET_LINK_LIBRARIES(hiredis_static PUBLIC ws2_32 crypt32)
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
TARGET_LINK_LIBRARIES(hiredis PUBLIC m)
TARGET_LINK_LIBRARIES(hiredis_static PUBLIC m)
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "SunOS")
TARGET_LINK_LIBRARIES(hiredis PUBLIC socket)
TARGET_LINK_LIBRARIES(hiredis_static PUBLIC socket)
ENDIF()
TARGET_INCLUDE_DIRECTORIES(hiredis PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
TARGET_INCLUDE_DIRECTORIES(hiredis_static PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
CONFIGURE_FILE(hiredis.pc.in hiredis.pc @ONLY)
@ -99,26 +93,23 @@ set(CPACK_RPM_PACKAGE_AUTOREQPROV ON)
include(CPack)
INSTALL(TARGETS hiredis hiredis_static
INSTALL(TARGETS hiredis
EXPORT hiredis-targets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
if (MSVC)
if (MSVC AND BUILD_SHARED_LIBS)
INSTALL(FILES $<TARGET_PDB_FILE:hiredis>
DESTINATION ${CMAKE_INSTALL_BINDIR}
CONFIGURATIONS Debug RelWithDebInfo)
INSTALL(FILES $<TARGET_FILE_DIR:hiredis_static>/$<TARGET_FILE_BASE_NAME:hiredis_static>.pdb
DESTINATION ${CMAKE_INSTALL_LIBDIR}
CONFIGURATIONS Debug RelWithDebInfo)
endif()
# For NuGet packages
INSTALL(FILES hiredis.targets
DESTINATION build/native)
INSTALL(FILES hiredis.h read.h sds.h async.h alloc.h
INSTALL(FILES hiredis.h read.h sds.h async.h alloc.h sockcompat.h
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis)
INSTALL(DIRECTORY adapters
@ -131,9 +122,15 @@ export(EXPORT hiredis-targets
FILE "${CMAKE_CURRENT_BINARY_DIR}/hiredis-targets.cmake"
NAMESPACE hiredis::)
SET(CMAKE_CONF_INSTALL_DIR share/hiredis)
if(WIN32)
SET(CMAKE_CONF_INSTALL_DIR share/hiredis)
else()
SET(CMAKE_CONF_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/hiredis)
endif()
SET(INCLUDE_INSTALL_DIR include)
include(CMakePackageConfigHelpers)
write_basic_package_version_file("${CMAKE_CURRENT_BINARY_DIR}/hiredis-config-version.cmake"
COMPATIBILITY SameMajorVersion)
configure_package_config_file(hiredis-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis-config.cmake
INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR}
PATH_VARS INCLUDE_INSTALL_DIR)
@ -144,6 +141,7 @@ INSTALL(EXPORT hiredis-targets
DESTINATION ${CMAKE_CONF_INSTALL_DIR})
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis-config.cmake
${CMAKE_CURRENT_BINARY_DIR}/hiredis-config-version.cmake
DESTINATION ${CMAKE_CONF_INSTALL_DIR})
@ -156,12 +154,10 @@ IF(ENABLE_SSL)
FIND_PACKAGE(OpenSSL REQUIRED)
SET(hiredis_ssl_sources
ssl.c)
ADD_LIBRARY(hiredis_ssl SHARED
${hiredis_ssl_sources})
ADD_LIBRARY(hiredis_ssl_static STATIC
${hiredis_ssl_sources})
ADD_LIBRARY(hiredis_ssl ${hiredis_ssl_sources})
ADD_LIBRARY(hiredis::hiredis_ssl ALIAS hiredis_ssl)
IF (APPLE)
IF (APPLE AND BUILD_SHARED_LIBS)
SET_PROPERTY(TARGET hiredis_ssl PROPERTY LINK_FLAGS "-Wl,-undefined -Wl,dynamic_lookup")
ENDIF()
@ -169,34 +165,26 @@ IF(ENABLE_SSL)
PROPERTIES
WINDOWS_EXPORT_ALL_SYMBOLS TRUE
VERSION "${HIREDIS_SONAME}")
SET_TARGET_PROPERTIES(hiredis_ssl_static
PROPERTIES COMPILE_PDB_NAME hiredis_ssl_static)
SET_TARGET_PROPERTIES(hiredis_ssl_static
PROPERTIES COMPILE_PDB_NAME_DEBUG hiredis_ssl_static${CMAKE_DEBUG_POSTFIX})
TARGET_INCLUDE_DIRECTORIES(hiredis_ssl PRIVATE "${OPENSSL_INCLUDE_DIR}")
TARGET_INCLUDE_DIRECTORIES(hiredis_ssl_static PRIVATE "${OPENSSL_INCLUDE_DIR}")
TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE ${OPENSSL_LIBRARIES})
IF (WIN32 OR MINGW)
IF(MSVC)
SET_TARGET_PROPERTIES(hiredis_ssl
PROPERTIES COMPILE_FLAGS /Z7)
ENDIF()
TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE OpenSSL::SSL)
IF(WIN32)
TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE hiredis)
TARGET_LINK_LIBRARIES(hiredis_ssl_static PUBLIC hiredis_static)
ENDIF()
CONFIGURE_FILE(hiredis_ssl.pc.in hiredis_ssl.pc @ONLY)
INSTALL(TARGETS hiredis_ssl hiredis_ssl_static
INSTALL(TARGETS hiredis_ssl
EXPORT hiredis_ssl-targets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
if (MSVC)
if (MSVC AND BUILD_SHARED_LIBS)
INSTALL(FILES $<TARGET_PDB_FILE:hiredis_ssl>
DESTINATION ${CMAKE_INSTALL_BINDIR}
CONFIGURATIONS Debug RelWithDebInfo)
INSTALL(FILES $<TARGET_FILE_DIR:hiredis_ssl_static>/$<TARGET_FILE_BASE_NAME:hiredis_ssl_static>.pdb
DESTINATION ${CMAKE_INSTALL_LIBDIR}
CONFIGURATIONS Debug RelWithDebInfo)
endif()
INSTALL(FILES hiredis_ssl.h
@ -209,7 +197,11 @@ IF(ENABLE_SSL)
FILE "${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-targets.cmake"
NAMESPACE hiredis::)
SET(CMAKE_CONF_INSTALL_DIR share/hiredis_ssl)
if(WIN32)
SET(CMAKE_CONF_INSTALL_DIR share/hiredis_ssl)
else()
SET(CMAKE_CONF_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/hiredis_ssl)
endif()
configure_package_config_file(hiredis_ssl-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-config.cmake
INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR}
PATH_VARS INCLUDE_INSTALL_DIR)
@ -241,5 +233,5 @@ ENDIF()
# Add examples
IF(ENABLE_EXAMPLES)
ADD_SUBDIRECTORY(examples)
ADD_SUBDIRECTORY(examples)
ENDIF(ENABLE_EXAMPLES)

60
deps/hiredis/Makefile vendored
View File

@ -4,7 +4,7 @@
# This file is released under the BSD license, see the COPYING file
OBJ=alloc.o net.o hiredis.o sds.o async.o read.o sockcompat.o
EXAMPLES=hiredis-example hiredis-example-libevent hiredis-example-libev hiredis-example-glib hiredis-example-push
EXAMPLES=hiredis-example hiredis-example-libevent hiredis-example-libev hiredis-example-glib hiredis-example-push hiredis-example-poll
TESTS=hiredis-test
LIBNAME=libhiredis
PKGCONFNAME=hiredis.pc
@ -39,9 +39,9 @@ export REDIS_TEST_CONFIG
CC:=$(shell sh -c 'type $${CC%% *} >/dev/null 2>/dev/null && echo $(CC) || echo gcc')
CXX:=$(shell sh -c 'type $${CXX%% *} >/dev/null 2>/dev/null && echo $(CXX) || echo g++')
OPTIMIZATION?=-O3
WARNINGS=-Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers
WARNINGS=-Wall -Wextra -Werror -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers
DEBUG_FLAGS?= -g -ggdb
REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CPPFLAGS) $(CFLAGS) $(WARNINGS) $(DEBUG_FLAGS)
REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CPPFLAGS) $(CFLAGS) $(WARNINGS) $(DEBUG_FLAGS) $(PLATFORM_FLAGS)
REAL_LDFLAGS=$(LDFLAGS)
DYLIBSUFFIX=so
@ -50,7 +50,7 @@ DYLIB_MINOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_SONAME)
DYLIB_MAJOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR)
DYLIBNAME=$(LIBNAME).$(DYLIBSUFFIX)
DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(DYLIB_MINOR_NAME)
DYLIB_MAKE_CMD=$(CC) $(PLATFORM_FLAGS) -shared -Wl,-soname,$(DYLIB_MINOR_NAME)
STLIBNAME=$(LIBNAME).$(STLIBSUFFIX)
STLIB_MAKE_CMD=$(AR) rcs
@ -63,7 +63,7 @@ SSL_DYLIB_MINOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_SONAME)
SSL_DYLIB_MAJOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR)
SSL_DYLIBNAME=$(SSL_LIBNAME).$(DYLIBSUFFIX)
SSL_STLIBNAME=$(SSL_LIBNAME).$(STLIBSUFFIX)
SSL_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(SSL_DYLIB_MINOR_NAME)
SSL_DYLIB_MAKE_CMD=$(CC) $(PLATFORM_FLAGS) -shared -Wl,-soname,$(SSL_DYLIB_MINOR_NAME)
USE_SSL?=0
ifeq ($(USE_SSL),1)
@ -92,18 +92,25 @@ ifeq ($(TEST_ASYNC),1)
endif
ifeq ($(USE_SSL),1)
ifeq ($(uname_S),Linux)
ifdef OPENSSL_PREFIX
CFLAGS+=-I$(OPENSSL_PREFIX)/include
SSL_LDFLAGS+=-L$(OPENSSL_PREFIX)/lib -lssl -lcrypto
else
SSL_LDFLAGS=-lssl -lcrypto
ifndef OPENSSL_PREFIX
ifeq ($(uname_S),Darwin)
SEARCH_PATH1=/opt/homebrew/opt/openssl
SEARCH_PATH2=/usr/local/opt/openssl
ifneq ($(wildcard $(SEARCH_PATH1)),)
OPENSSL_PREFIX=$(SEARCH_PATH1)
else ifneq ($(wildcard $(SEARCH_PATH2)),)
OPENSSL_PREFIX=$(SEARCH_PATH2)
endif
endif
else
OPENSSL_PREFIX?=/usr/local/opt/openssl
CFLAGS+=-I$(OPENSSL_PREFIX)/include
SSL_LDFLAGS+=-L$(OPENSSL_PREFIX)/lib -lssl -lcrypto
endif
ifdef OPENSSL_PREFIX
CFLAGS+=-I$(OPENSSL_PREFIX)/include
SSL_LDFLAGS+=-L$(OPENSSL_PREFIX)/lib
endif
SSL_LDFLAGS+=-lssl -lcrypto
endif
ifeq ($(uname_S),FreeBSD)
@ -180,6 +187,9 @@ hiredis-example-libevent-ssl: examples/example-libevent-ssl.c adapters/libevent.
hiredis-example-libev: examples/example-libev.c adapters/libev.h $(STLIBNAME)
$(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -lev $(STLIBNAME) $(REAL_LDFLAGS)
hiredis-example-libhv: examples/example-libhv.c adapters/libhv.h $(STLIBNAME)
$(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -lhv $(STLIBNAME) $(REAL_LDFLAGS)
hiredis-example-glib: examples/example-glib.c adapters/glib.h $(STLIBNAME)
$(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(shell pkg-config --cflags --libs glib-2.0) $(STLIBNAME) $(REAL_LDFLAGS)
@ -192,6 +202,9 @@ hiredis-example-macosx: examples/example-macosx.c adapters/macosx.h $(STLIBNAME)
hiredis-example-ssl: examples/example-ssl.c $(STLIBNAME) $(SSL_STLIBNAME)
$(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(SSL_STLIBNAME) $(REAL_LDFLAGS) $(SSL_LDFLAGS)
hiredis-example-poll: examples/example-poll.c adapters/poll.h $(STLIBNAME)
$(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(REAL_LDFLAGS)
ifndef AE_DIR
hiredis-example-ae:
@echo "Please specify AE_DIR (e.g. <redis repository>/src)"
@ -269,20 +282,22 @@ $(PKGCONFNAME): hiredis.h
@echo prefix=$(PREFIX) > $@
@echo exec_prefix=\$${prefix} >> $@
@echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@
@echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@
@echo includedir=$(PREFIX)/include >> $@
@echo pkgincludedir=$(PREFIX)/$(INCLUDE_PATH) >> $@
@echo >> $@
@echo Name: hiredis >> $@
@echo Description: Minimalistic C client library for Redis. >> $@
@echo Version: $(HIREDIS_MAJOR).$(HIREDIS_MINOR).$(HIREDIS_PATCH) >> $@
@echo Libs: -L\$${libdir} -lhiredis >> $@
@echo Cflags: -I\$${includedir} -D_FILE_OFFSET_BITS=64 >> $@
@echo Cflags: -I\$${pkgincludedir} -I\$${includedir} -D_FILE_OFFSET_BITS=64 >> $@
$(SSL_PKGCONFNAME): hiredis_ssl.h
@echo "Generating $@ for pkgconfig..."
@echo prefix=$(PREFIX) > $@
@echo exec_prefix=\$${prefix} >> $@
@echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@
@echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@
@echo includedir=$(PREFIX)/include >> $@
@echo pkgincludedir=$(PREFIX)/$(INCLUDE_PATH) >> $@
@echo >> $@
@echo Name: hiredis_ssl >> $@
@echo Description: SSL Support for hiredis. >> $@
@ -293,10 +308,10 @@ $(SSL_PKGCONFNAME): hiredis_ssl.h
install: $(DYLIBNAME) $(STLIBNAME) $(PKGCONFNAME) $(SSL_INSTALL)
mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_INCLUDE_PATH)/adapters $(INSTALL_LIBRARY_PATH)
$(INSTALL) hiredis.h async.h read.h sds.h alloc.h $(INSTALL_INCLUDE_PATH)
$(INSTALL) hiredis.h async.h read.h sds.h alloc.h sockcompat.h $(INSTALL_INCLUDE_PATH)
$(INSTALL) adapters/*.h $(INSTALL_INCLUDE_PATH)/adapters
$(INSTALL) $(DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(DYLIB_MINOR_NAME)
cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIBNAME)
cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIBNAME) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIB_MAJOR_NAME)
$(INSTALL) $(STLIBNAME) $(INSTALL_LIBRARY_PATH)
mkdir -p $(INSTALL_PKGCONF_PATH)
$(INSTALL) $(PKGCONFNAME) $(INSTALL_PKGCONF_PATH)
@ -305,7 +320,7 @@ install-ssl: $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(SSL_PKGCONFNAME)
mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_LIBRARY_PATH)
$(INSTALL) hiredis_ssl.h $(INSTALL_INCLUDE_PATH)
$(INSTALL) $(SSL_DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(SSL_DYLIB_MINOR_NAME)
cd $(INSTALL_LIBRARY_PATH) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIBNAME)
cd $(INSTALL_LIBRARY_PATH) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIBNAME) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIB_MAJOR_NAME)
$(INSTALL) $(SSL_STLIBNAME) $(INSTALL_LIBRARY_PATH)
mkdir -p $(INSTALL_PKGCONF_PATH)
$(INSTALL) $(SSL_PKGCONFNAME) $(INSTALL_PKGCONF_PATH)
@ -330,7 +345,8 @@ coverage: gcov
make check
mkdir -p tmp/lcov
lcov -d . -c --exclude '/usr*' -o tmp/lcov/hiredis.info
genhtml --legend -o tmp/lcov/report tmp/lcov/hiredis.info
lcov -q -l tmp/lcov/hiredis.info
genhtml --legend -q -o tmp/lcov/report tmp/lcov/hiredis.info
noopt:
$(MAKE) OPTIMIZATION=""

186
deps/hiredis/README.md vendored
View File

@ -23,6 +23,13 @@ Redis version >= 1.2.0.
The library comes with multiple APIs. There is the
*synchronous API*, the *asynchronous API* and the *reply parsing API*.
## Upgrading to `1.1.0`
Almost all users will simply need to recompile their applications against the newer version of hiredis.
**NOTE**: Hiredis can now return `nan` in addition to `-inf` and `inf` in a `REDIS_REPLY_DOUBLE`.
Applications that deal with `RESP3` doubles should make sure to account for this.
## Upgrading to `1.0.2`
<span style="color:red">NOTE: v1.0.1 erroneously bumped SONAME, which is why it is skipped here.</span>
@ -82,6 +89,7 @@ an error state. The field `errstr` will contain a string with a description of
the error. More information on errors can be found in the **Errors** section.
After trying to connect to Redis using `redisConnect` you should
check the `err` field to see if establishing the connection was successful:
```c
redisContext *c = redisConnect("127.0.0.1", 6379);
if (c == NULL || c->err) {
@ -94,8 +102,74 @@ if (c == NULL || c->err) {
}
```
One can also use `redisConnectWithOptions` which takes a `redisOptions` argument
that can be configured with endpoint information as well as many different flags
to change how the `redisContext` will be configured.
```c
redisOptions opt = {0};
/* One can set the endpoint with one of our helper macros */
if (tcp) {
REDIS_OPTIONS_SET_TCP(&opt, "localhost", 6379);
} else {
REDIS_OPTIONS_SET_UNIX(&opt, "/tmp/redis.sock");
}
/* And privdata can be specified with another helper */
REDIS_OPTIONS_SET_PRIVDATA(&opt, myPrivData, myPrivDataDtor);
/* Finally various options may be set via the `options` member, as described below */
opt->options |= REDIS_OPT_PREFER_IPV4;
```
If a connection is lost, `int redisReconnect(redisContext *c)` can be used to restore the connection using the same endpoint and options as the given context.
### Configurable redisOptions flags
There are several flags you may set in the `redisOptions` struct to change default behavior. You can specify the flags via the `redisOptions->options` member.
| Flag | Description |
| --- | --- |
| REDIS\_OPT\_NONBLOCK | Tells hiredis to make a non-blocking connection. |
| REDIS\_OPT\_REUSEADDR | Tells hiredis to set the [SO_REUSEADDR](https://man7.org/linux/man-pages/man7/socket.7.html) socket option |
| REDIS\_OPT\_PREFER\_IPV4<br>REDIS\_OPT\_PREFER_IPV6<br>REDIS\_OPT\_PREFER\_IP\_UNSPEC | Informs hiredis to either prefer IPv4 or IPv6 when invoking [getaddrinfo](https://man7.org/linux/man-pages/man3/gai_strerror.3.html). `REDIS_OPT_PREFER_IP_UNSPEC` will cause hiredis to specify `AF_UNSPEC` in the getaddrinfo call, which means both IPv4 and IPv6 addresses will be searched simultaneously.<br>Hiredis prefers IPv4 by default. |
| REDIS\_OPT\_NO\_PUSH\_AUTOFREE | Tells hiredis to not install the default RESP3 PUSH handler (which just intercepts and frees the replies). This is useful in situations where you want to process these messages in-band. |
| REDIS\_OPT\_NOAUTOFREEREPLIES | **ASYNC**: tells hiredis not to automatically invoke `freeReplyObject` after executing the reply callback. |
| REDIS\_OPT\_NOAUTOFREE | **ASYNC**: Tells hiredis not to automatically free the `redisAsyncContext` on connection/communication failure, but only if the user makes an explicit call to `redisAsyncDisconnect` or `redisAsyncFree` |
*Note: A `redisContext` is not thread-safe.*
### Other configuration using socket options
The following socket options are applied directly to the underlying socket.
The values are not stored in the `redisContext`, so they are not automatically applied when reconnecting using `redisReconnect()`.
These functions return `REDIS_OK` on success.
On failure, `REDIS_ERR` is returned and the underlying connection is closed.
To configure these for an asyncronous context (see *Asynchronous API* below), use `ac->c` to get the redisContext out of an asyncRedisContext.
```C
int redisEnableKeepAlive(redisContext *c);
int redisEnableKeepAliveWithInterval(redisContext *c, int interval);
```
Enables TCP keepalive by setting the following socket options (with some variations depending on OS):
* `SO_KEEPALIVE`;
* `TCP_KEEPALIVE` or `TCP_KEEPIDLE`, value configurable using the `interval` parameter, default 15 seconds;
* `TCP_KEEPINTVL` set to 1/3 of `interval`;
* `TCP_KEEPCNT` set to 3.
```C
int redisSetTcpUserTimeout(redisContext *c, unsigned int timeout);
```
Set the `TCP_USER_TIMEOUT` Linux-specific socket option which is as described in the `tcp` man page:
> When the value is greater than 0, it specifies the maximum amount of time in milliseconds that trans mitted data may remain unacknowledged before TCP will forcibly close the corresponding connection and return ETIMEDOUT to the application.
> If the option value is specified as 0, TCP will use the system default.
### Sending commands
There are several ways to issue commands to Redis. The first that will be introduced is
@ -250,7 +324,7 @@ following two execution paths:
* Read from the socket until a single reply could be parsed
The function `redisGetReply` is exported as part of the Hiredis API and can be used when a reply
is expected on the socket. To pipeline commands, the only things that needs to be done is
is expected on the socket. To pipeline commands, the only thing that needs to be done is
filling up the output buffer. For this cause, two commands can be used that are identical
to the `redisCommand` family, apart from not returning a reply:
```c
@ -320,23 +394,48 @@ Redis. It returns a pointer to the newly created `redisAsyncContext` struct. The
should be checked after creation to see if there were errors creating the connection.
Because the connection that will be created is non-blocking, the kernel is not able to
instantly return if the specified host and port is able to accept a connection.
In case of error, it is the caller's responsibility to free the context using `redisAsyncFree()`
*Note: A `redisAsyncContext` is not thread-safe.*
An application function creating a connection might look like this:
```c
redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379);
if (c->err) {
printf("Error: %s\n", c->errstr);
// handle error
void appConnect(myAppData *appData)
{
redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379);
if (c->err) {
printf("Error: %s\n", c->errstr);
// handle error
redisAsyncFree(c);
c = NULL;
} else {
appData->context = c;
appData->connecting = 1;
c->data = appData; /* store application pointer for the callbacks */
redisAsyncSetConnectCallback(c, appOnConnect);
redisAsyncSetDisconnectCallback(c, appOnDisconnect);
}
}
```
The asynchronous context can hold a disconnect callback function that is called when the
connection is disconnected (either because of an error or per user request). This function should
The asynchronous context _should_ hold a *connect* callback function that is called when the connection
attempt completes, either successfully or with an error.
It _can_ also hold a *disconnect* callback function that is called when the
connection is disconnected (either because of an error or per user request). Both callbacks should
have the following prototype:
```c
void(const redisAsyncContext *c, int status);
```
On a *connect*, the `status` argument is set to `REDIS_OK` if the connection attempt succeeded. In this
case, the context is ready to accept commands. If it is called with `REDIS_ERR` then the
connection attempt failed. The `err` field in the context can be accessed to find out the cause of the error.
After a failed connection attempt, the context object is automatically freed by the library after calling
the connect callback. This may be a good point to create a new context and retry the connection.
On a disconnect, the `status` argument is set to `REDIS_OK` when disconnection was initiated by the
user, or `REDIS_ERR` when the disconnection was caused by an error. When it is `REDIS_ERR`, the `err`
field in the context can be accessed to find out the cause of the error.
@ -344,12 +443,46 @@ field in the context can be accessed to find out the cause of the error.
The context object is always freed after the disconnect callback fired. When a reconnect is needed,
the disconnect callback is a good point to do so.
Setting the disconnect callback can only be done once per context. For subsequent calls it will
return `REDIS_ERR`. The function to set the disconnect callback has the following prototype:
Setting the connect or disconnect callbacks can only be done once per context. For subsequent calls the
api will return `REDIS_ERR`. The function to set the callbacks have the following prototype:
```c
/* Alternatively you can use redisAsyncSetConnectCallbackNC which will be passed a non-const
redisAsyncContext* on invocation (e.g. allowing writes to the privdata member). */
int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn);
int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn);
```
`ac->data` may be used to pass user data to this callback, the same can be done for redisConnectCallback.
`ac->data` may be used to pass user data to both callbacks. A typical implementation
might look something like this:
```c
void appOnConnect(redisAsyncContext *c, int status)
{
myAppData *appData = (myAppData*)c->data; /* get my application specific context*/
appData->connecting = 0;
if (status == REDIS_OK) {
appData->connected = 1;
} else {
appData->connected = 0;
appData->err = c->err;
appData->context = NULL; /* avoid stale pointer when callback returns */
}
appAttemptReconnect();
}
void appOnDisconnect(redisAsyncContext *c, int status)
{
myAppData *appData = (myAppData*)c->data; /* get my application specific context*/
appData->connected = 0;
appData->err = c->err;
appData->context = NULL; /* avoid stale pointer when callback returns */
if (status == REDIS_OK) {
appNotifyDisconnectCompleted(mydata);
} else {
appNotifyUnexpectedDisconnect(mydata);
appAttemptReconnect();
}
}
```
### Sending commands and their callbacks
In an asynchronous context, commands are automatically pipelined due to the nature of an event loop.
@ -382,6 +515,14 @@ valid for the duration of the callback.
All pending callbacks are called with a `NULL` reply when the context encountered an error.
For every command issued, with the exception of **SUBSCRIBE** and **PSUBSCRIBE**, the callback is
called exactly once. Even if the context object id disconnected or deleted, every pending callback
will be called with a `NULL` reply.
For **SUBSCRIBE** and **PSUBSCRIBE**, the callbacks may be called repeatedly until an `unsubscribe`
message arrives. This will be the last invocation of the callback. In case of error, the callbacks
may receive a final `NULL` reply instead.
### Disconnecting
An asynchronous connection can be terminated using:
@ -394,6 +535,15 @@ have been written to the socket, their respective replies have been read and the
callbacks have been executed. After this, the disconnection callback is executed with the
`REDIS_OK` status and the context object is freed.
The connection can be forcefully disconnected using
```c
void redisAsyncFree(redisAsyncContext *ac);
```
In this case, nothing more is written to the socket, all pending callbacks are called with a `NULL`
reply and the disconnection callback is called with `REDIS_OK`, after which the context object
is freed.
### Hooking it up to event library *X*
There are a few hooks that need to be set on the context object after it is created.
@ -497,8 +647,8 @@ unaffected so no additional dependencies are introduced.
First, you'll need to make sure you include the SSL header file:
```c
#include "hiredis.h"
#include "hiredis_ssl.h"
#include <hiredis/hiredis.h>
#include <hiredis/hiredis_ssl.h>
```
You will also need to link against `libhiredis_ssl`, **in addition** to
@ -529,7 +679,7 @@ redisSSLContext *ssl_context;
/* An error variable to indicate what went wrong, if the context fails to
* initialize.
*/
redisSSLContextError ssl_error;
redisSSLContextError ssl_error = REDIS_SSL_CTX_NONE;
/* Initialize global OpenSSL state.
*
@ -547,11 +697,11 @@ ssl_context = redisCreateSSLContext(
"redis.mydomain.com", /* Server name to request (SNI), optional */
&ssl_error);
if(ssl_context == NULL || ssl_error != 0) {
if(ssl_context == NULL || ssl_error != REDIS_SSL_CTX_NONE) {
/* Handle error and abort... */
/* e.g.
printf("SSL error: %s\n",
(ssl_error != 0) ?
/* e.g.
printf("SSL error: %s\n",
(ssl_error != REDIS_SSL_CTX_NONE) ?
redisSSLContextGetError(ssl_error) : "Unknown error");
// Abort
*/
@ -633,7 +783,7 @@ If you have a unique use-case where you don't want hiredis to automatically inte
redisSetPushCallback(context, NULL);
```
_Note: With no handler configured, calls to `redisCommand` may generate more than one reply, so this strategy is only applicable when there's some kind of blocking`redisGetReply()` loop (e.g. `MONITOR` or `SUBSCRIBE` workloads)._
_Note: With no handler configured, calls to `redisCommand` may generate more than one reply, so this strategy is only applicable when there's some kind of blocking `redisGetReply()` loop (e.g. `MONITOR` or `SUBSCRIBE` workloads)._
## Allocator injection

123
deps/hiredis/adapters/libhv.h vendored Normal file
View File

@ -0,0 +1,123 @@
#ifndef __HIREDIS_LIBHV_H__
#define __HIREDIS_LIBHV_H__
#include <hv/hloop.h>
#include "../hiredis.h"
#include "../async.h"
typedef struct redisLibhvEvents {
hio_t *io;
htimer_t *timer;
} redisLibhvEvents;
static void redisLibhvHandleEvents(hio_t* io) {
redisAsyncContext* context = (redisAsyncContext*)hevent_userdata(io);
int events = hio_events(io);
int revents = hio_revents(io);
if (context && (events & HV_READ) && (revents & HV_READ)) {
redisAsyncHandleRead(context);
}
if (context && (events & HV_WRITE) && (revents & HV_WRITE)) {
redisAsyncHandleWrite(context);
}
}
static void redisLibhvAddRead(void *privdata) {
redisLibhvEvents* events = (redisLibhvEvents*)privdata;
hio_add(events->io, redisLibhvHandleEvents, HV_READ);
}
static void redisLibhvDelRead(void *privdata) {
redisLibhvEvents* events = (redisLibhvEvents*)privdata;
hio_del(events->io, HV_READ);
}
static void redisLibhvAddWrite(void *privdata) {
redisLibhvEvents* events = (redisLibhvEvents*)privdata;
hio_add(events->io, redisLibhvHandleEvents, HV_WRITE);
}
static void redisLibhvDelWrite(void *privdata) {
redisLibhvEvents* events = (redisLibhvEvents*)privdata;
hio_del(events->io, HV_WRITE);
}
static void redisLibhvCleanup(void *privdata) {
redisLibhvEvents* events = (redisLibhvEvents*)privdata;
if (events->timer)
htimer_del(events->timer);
hio_close(events->io);
hevent_set_userdata(events->io, NULL);
hi_free(events);
}
static void redisLibhvTimeout(htimer_t* timer) {
hio_t* io = (hio_t*)hevent_userdata(timer);
redisAsyncHandleTimeout((redisAsyncContext*)hevent_userdata(io));
}
static void redisLibhvSetTimeout(void *privdata, struct timeval tv) {
redisLibhvEvents* events;
uint32_t millis;
hloop_t* loop;
events = (redisLibhvEvents*)privdata;
millis = tv.tv_sec * 1000 + tv.tv_usec / 1000;
if (millis == 0) {
/* Libhv disallows zero'd timers so treat this as a delete or NO OP */
if (events->timer) {
htimer_del(events->timer);
events->timer = NULL;
}
} else if (events->timer == NULL) {
/* Add new timer */
loop = hevent_loop(events->io);
events->timer = htimer_add(loop, redisLibhvTimeout, millis, 1);
hevent_set_userdata(events->timer, events->io);
} else {
/* Update existing timer */
htimer_reset(events->timer, millis);
}
}
static int redisLibhvAttach(redisAsyncContext* ac, hloop_t* loop) {
redisContext *c = &(ac->c);
redisLibhvEvents *events;
hio_t* io = NULL;
if (ac->ev.data != NULL) {
return REDIS_ERR;
}
/* Create container struct to keep track of our io and any timer */
events = (redisLibhvEvents*)hi_malloc(sizeof(*events));
if (events == NULL) {
return REDIS_ERR;
}
io = hio_get(loop, c->fd);
if (io == NULL) {
hi_free(events);
return REDIS_ERR;
}
hevent_set_userdata(io, ac);
events->io = io;
events->timer = NULL;
ac->ev.addRead = redisLibhvAddRead;
ac->ev.delRead = redisLibhvDelRead;
ac->ev.addWrite = redisLibhvAddWrite;
ac->ev.delWrite = redisLibhvDelWrite;
ac->ev.cleanup = redisLibhvCleanup;
ac->ev.scheduleTimer = redisLibhvSetTimeout;
ac->ev.data = events;
return REDIS_OK;
}
#endif

177
deps/hiredis/adapters/libsdevent.h vendored Normal file
View File

@ -0,0 +1,177 @@
#ifndef HIREDIS_LIBSDEVENT_H
#define HIREDIS_LIBSDEVENT_H
#include <systemd/sd-event.h>
#include "../hiredis.h"
#include "../async.h"
#define REDIS_LIBSDEVENT_DELETED 0x01
#define REDIS_LIBSDEVENT_ENTERED 0x02
typedef struct redisLibsdeventEvents {
redisAsyncContext *context;
struct sd_event *event;
struct sd_event_source *fdSource;
struct sd_event_source *timerSource;
int fd;
short flags;
short state;
} redisLibsdeventEvents;
static void redisLibsdeventDestroy(redisLibsdeventEvents *e) {
if (e->fdSource) {
e->fdSource = sd_event_source_disable_unref(e->fdSource);
}
if (e->timerSource) {
e->timerSource = sd_event_source_disable_unref(e->timerSource);
}
sd_event_unref(e->event);
hi_free(e);
}
static int redisLibsdeventTimeoutHandler(sd_event_source *s, uint64_t usec, void *userdata) {
((void)s);
((void)usec);
redisLibsdeventEvents *e = (redisLibsdeventEvents*)userdata;
redisAsyncHandleTimeout(e->context);
return 0;
}
static int redisLibsdeventHandler(sd_event_source *s, int fd, uint32_t event, void *userdata) {
((void)s);
((void)fd);
redisLibsdeventEvents *e = (redisLibsdeventEvents*)userdata;
e->state |= REDIS_LIBSDEVENT_ENTERED;
#define CHECK_DELETED() if (e->state & REDIS_LIBSDEVENT_DELETED) {\
redisLibsdeventDestroy(e);\
return 0; \
}
if ((event & EPOLLIN) && e->context && (e->state & REDIS_LIBSDEVENT_DELETED) == 0) {
redisAsyncHandleRead(e->context);
CHECK_DELETED();
}
if ((event & EPOLLOUT) && e->context && (e->state & REDIS_LIBSDEVENT_DELETED) == 0) {
redisAsyncHandleWrite(e->context);
CHECK_DELETED();
}
e->state &= ~REDIS_LIBSDEVENT_ENTERED;
#undef CHECK_DELETED
return 0;
}
static void redisLibsdeventAddRead(void *userdata) {
redisLibsdeventEvents *e = (redisLibsdeventEvents*)userdata;
if (e->flags & EPOLLIN) {
return;
}
e->flags |= EPOLLIN;
if (e->flags & EPOLLOUT) {
sd_event_source_set_io_events(e->fdSource, e->flags);
} else {
sd_event_add_io(e->event, &e->fdSource, e->fd, e->flags, redisLibsdeventHandler, e);
}
}
static void redisLibsdeventDelRead(void *userdata) {
redisLibsdeventEvents *e = (redisLibsdeventEvents*)userdata;
e->flags &= ~EPOLLIN;
if (e->flags) {
sd_event_source_set_io_events(e->fdSource, e->flags);
} else {
e->fdSource = sd_event_source_disable_unref(e->fdSource);
}
}
static void redisLibsdeventAddWrite(void *userdata) {
redisLibsdeventEvents *e = (redisLibsdeventEvents*)userdata;
if (e->flags & EPOLLOUT) {
return;
}
e->flags |= EPOLLOUT;
if (e->flags & EPOLLIN) {
sd_event_source_set_io_events(e->fdSource, e->flags);
} else {
sd_event_add_io(e->event, &e->fdSource, e->fd, e->flags, redisLibsdeventHandler, e);
}
}
static void redisLibsdeventDelWrite(void *userdata) {
redisLibsdeventEvents *e = (redisLibsdeventEvents*)userdata;
e->flags &= ~EPOLLOUT;
if (e->flags) {
sd_event_source_set_io_events(e->fdSource, e->flags);
} else {
e->fdSource = sd_event_source_disable_unref(e->fdSource);
}
}
static void redisLibsdeventCleanup(void *userdata) {
redisLibsdeventEvents *e = (redisLibsdeventEvents*)userdata;
if (!e) {
return;
}
if (e->state & REDIS_LIBSDEVENT_ENTERED) {
e->state |= REDIS_LIBSDEVENT_DELETED;
} else {
redisLibsdeventDestroy(e);
}
}
static void redisLibsdeventSetTimeout(void *userdata, struct timeval tv) {
redisLibsdeventEvents *e = (redisLibsdeventEvents *)userdata;
uint64_t usec = tv.tv_sec * 1000000 + tv.tv_usec;
if (!e->timerSource) {
sd_event_add_time_relative(e->event, &e->timerSource, CLOCK_MONOTONIC, usec, 1, redisLibsdeventTimeoutHandler, e);
} else {
sd_event_source_set_time_relative(e->timerSource, usec);
}
}
static int redisLibsdeventAttach(redisAsyncContext *ac, struct sd_event *event) {
redisContext *c = &(ac->c);
redisLibsdeventEvents *e;
/* Nothing should be attached when something is already attached */
if (ac->ev.data != NULL)
return REDIS_ERR;
/* Create container for context and r/w events */
e = (redisLibsdeventEvents*)hi_calloc(1, sizeof(*e));
if (e == NULL)
return REDIS_ERR;
/* Initialize and increase event refcount */
e->context = ac;
e->event = event;
e->fd = c->fd;
sd_event_ref(event);
/* Register functions to start/stop listening for events */
ac->ev.addRead = redisLibsdeventAddRead;
ac->ev.delRead = redisLibsdeventDelRead;
ac->ev.addWrite = redisLibsdeventAddWrite;
ac->ev.delWrite = redisLibsdeventDelWrite;
ac->ev.cleanup = redisLibsdeventCleanup;
ac->ev.scheduleTimer = redisLibsdeventSetTimeout;
ac->ev.data = e;
return REDIS_OK;
}
#endif

View File

@ -30,6 +30,10 @@ static void redisLibuvPoll(uv_poll_t* handle, int status, int events) {
static void redisLibuvAddRead(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
if (p->events & UV_READABLE) {
return;
}
p->events |= UV_READABLE;
uv_poll_start(&p->handle, p->events, redisLibuvPoll);
@ -52,6 +56,10 @@ static void redisLibuvDelRead(void *privdata) {
static void redisLibuvAddWrite(void *privdata) {
redisLibuvEvents* p = (redisLibuvEvents*)privdata;
if (p->events & UV_WRITABLE) {
return;
}
p->events |= UV_WRITABLE;
uv_poll_start(&p->handle, p->events, redisLibuvPoll);

197
deps/hiredis/adapters/poll.h vendored Normal file
View File

@ -0,0 +1,197 @@
#ifndef HIREDIS_POLL_H
#define HIREDIS_POLL_H
#include "../async.h"
#include "../sockcompat.h"
#include <string.h> // for memset
#include <errno.h>
/* Values to return from redisPollTick */
#define REDIS_POLL_HANDLED_READ 1
#define REDIS_POLL_HANDLED_WRITE 2
#define REDIS_POLL_HANDLED_TIMEOUT 4
/* An adapter to allow manual polling of the async context by checking the state
* of the underlying file descriptor. Useful in cases where there is no formal
* IO event loop but regular ticking can be used, such as in game engines. */
typedef struct redisPollEvents {
redisAsyncContext *context;
redisFD fd;
char reading, writing;
char in_tick;
char deleted;
double deadline;
} redisPollEvents;
static double redisPollTimevalToDouble(struct timeval *tv) {
if (tv == NULL)
return 0.0;
return tv->tv_sec + tv->tv_usec / 1000000.00;
}
static double redisPollGetNow(void) {
#ifndef _MSC_VER
struct timeval tv;
gettimeofday(&tv,NULL);
return redisPollTimevalToDouble(&tv);
#else
FILETIME ft;
ULARGE_INTEGER li;
GetSystemTimeAsFileTime(&ft);
li.HighPart = ft.dwHighDateTime;
li.LowPart = ft.dwLowDateTime;
return (double)li.QuadPart * 1e-7;
#endif
}
/* Poll for io, handling any pending callbacks. The timeout argument can be
* positive to wait for a maximum given time for IO, zero to poll, or negative
* to wait forever */
static int redisPollTick(redisAsyncContext *ac, double timeout) {
int reading, writing;
struct pollfd pfd;
int handled;
int ns;
int itimeout;
redisPollEvents *e = (redisPollEvents*)ac->ev.data;
if (!e)
return 0;
/* local flags, won't get changed during callbacks */
reading = e->reading;
writing = e->writing;
if (!reading && !writing)
return 0;
pfd.fd = e->fd;
pfd.events = 0;
if (reading)
pfd.events = POLLIN;
if (writing)
pfd.events |= POLLOUT;
if (timeout >= 0.0) {
itimeout = (int)(timeout * 1000.0);
} else {
itimeout = -1;
}
ns = poll(&pfd, 1, itimeout);
if (ns < 0) {
/* ignore the EINTR error */
if (errno != EINTR)
return ns;
ns = 0;
}
handled = 0;
e->in_tick = 1;
if (ns) {
if (reading && (pfd.revents & POLLIN)) {
redisAsyncHandleRead(ac);
handled |= REDIS_POLL_HANDLED_READ;
}
/* on Windows, connection failure is indicated with the Exception fdset.
* handle it the same as writable. */
if (writing && (pfd.revents & (POLLOUT | POLLERR))) {
/* context Read callback may have caused context to be deleted, e.g.
by doing an redisAsyncDisconnect() */
if (!e->deleted) {
redisAsyncHandleWrite(ac);
handled |= REDIS_POLL_HANDLED_WRITE;
}
}
}
/* perform timeouts */
if (!e->deleted && e->deadline != 0.0) {
double now = redisPollGetNow();
if (now >= e->deadline) {
/* deadline has passed. disable timeout and perform callback */
e->deadline = 0.0;
redisAsyncHandleTimeout(ac);
handled |= REDIS_POLL_HANDLED_TIMEOUT;
}
}
/* do a delayed cleanup if required */
if (e->deleted)
hi_free(e);
else
e->in_tick = 0;
return handled;
}
static void redisPollAddRead(void *data) {
redisPollEvents *e = (redisPollEvents*)data;
e->reading = 1;
}
static void redisPollDelRead(void *data) {
redisPollEvents *e = (redisPollEvents*)data;
e->reading = 0;
}
static void redisPollAddWrite(void *data) {
redisPollEvents *e = (redisPollEvents*)data;
e->writing = 1;
}
static void redisPollDelWrite(void *data) {
redisPollEvents *e = (redisPollEvents*)data;
e->writing = 0;
}
static void redisPollCleanup(void *data) {
redisPollEvents *e = (redisPollEvents*)data;
/* if we are currently processing a tick, postpone deletion */
if (e->in_tick)
e->deleted = 1;
else
hi_free(e);
}
static void redisPollScheduleTimer(void *data, struct timeval tv)
{
redisPollEvents *e = (redisPollEvents*)data;
double now = redisPollGetNow();
e->deadline = now + redisPollTimevalToDouble(&tv);
}
static int redisPollAttach(redisAsyncContext *ac) {
redisContext *c = &(ac->c);
redisPollEvents *e;
/* Nothing should be attached when something is already attached */
if (ac->ev.data != NULL)
return REDIS_ERR;
/* Create container for context and r/w events */
e = (redisPollEvents*)hi_malloc(sizeof(*e));
if (e == NULL)
return REDIS_ERR;
memset(e, 0, sizeof(*e));
e->context = ac;
e->fd = c->fd;
e->reading = e->writing = 0;
e->in_tick = e->deleted = 0;
e->deadline = 0.0;
/* Register functions to start/stop listening for events */
ac->ev.addRead = redisPollAddRead;
ac->ev.delRead = redisPollDelRead;
ac->ev.addWrite = redisPollAddWrite;
ac->ev.delWrite = redisPollDelWrite;
ac->ev.scheduleTimer = redisPollScheduleTimer;
ac->ev.cleanup = redisPollCleanup;
ac->ev.data = e;
return REDIS_OK;
}
#endif /* HIREDIS_POLL_H */

144
deps/hiredis/adapters/redismoduleapi.h vendored Normal file
View File

@ -0,0 +1,144 @@
#ifndef __HIREDIS_REDISMODULEAPI_H__
#define __HIREDIS_REDISMODULEAPI_H__
#include "redismodule.h"
#include "../async.h"
#include "../hiredis.h"
#include <sys/types.h>
typedef struct redisModuleEvents {
redisAsyncContext *context;
RedisModuleCtx *module_ctx;
int fd;
int reading, writing;
int timer_active;
RedisModuleTimerID timer_id;
} redisModuleEvents;
static inline void redisModuleReadEvent(int fd, void *privdata, int mask) {
(void) fd;
(void) mask;
redisModuleEvents *e = (redisModuleEvents*)privdata;
redisAsyncHandleRead(e->context);
}
static inline void redisModuleWriteEvent(int fd, void *privdata, int mask) {
(void) fd;
(void) mask;
redisModuleEvents *e = (redisModuleEvents*)privdata;
redisAsyncHandleWrite(e->context);
}
static inline void redisModuleAddRead(void *privdata) {
redisModuleEvents *e = (redisModuleEvents*)privdata;
if (!e->reading) {
e->reading = 1;
RedisModule_EventLoopAdd(e->fd, REDISMODULE_EVENTLOOP_READABLE, redisModuleReadEvent, e);
}
}
static inline void redisModuleDelRead(void *privdata) {
redisModuleEvents *e = (redisModuleEvents*)privdata;
if (e->reading) {
e->reading = 0;
RedisModule_EventLoopDel(e->fd, REDISMODULE_EVENTLOOP_READABLE);
}
}
static inline void redisModuleAddWrite(void *privdata) {
redisModuleEvents *e = (redisModuleEvents*)privdata;
if (!e->writing) {
e->writing = 1;
RedisModule_EventLoopAdd(e->fd, REDISMODULE_EVENTLOOP_WRITABLE, redisModuleWriteEvent, e);
}
}
static inline void redisModuleDelWrite(void *privdata) {
redisModuleEvents *e = (redisModuleEvents*)privdata;
if (e->writing) {
e->writing = 0;
RedisModule_EventLoopDel(e->fd, REDISMODULE_EVENTLOOP_WRITABLE);
}
}
static inline void redisModuleStopTimer(void *privdata) {
redisModuleEvents *e = (redisModuleEvents*)privdata;
if (e->timer_active) {
RedisModule_StopTimer(e->module_ctx, e->timer_id, NULL);
}
e->timer_active = 0;
}
static inline void redisModuleCleanup(void *privdata) {
redisModuleEvents *e = (redisModuleEvents*)privdata;
redisModuleDelRead(privdata);
redisModuleDelWrite(privdata);
redisModuleStopTimer(privdata);
hi_free(e);
}
static inline void redisModuleTimeout(RedisModuleCtx *ctx, void *privdata) {
(void) ctx;
redisModuleEvents *e = (redisModuleEvents*)privdata;
e->timer_active = 0;
redisAsyncHandleTimeout(e->context);
}
static inline void redisModuleSetTimeout(void *privdata, struct timeval tv) {
redisModuleEvents* e = (redisModuleEvents*)privdata;
redisModuleStopTimer(privdata);
mstime_t millis = tv.tv_sec * 1000 + tv.tv_usec / 1000.0;
e->timer_id = RedisModule_CreateTimer(e->module_ctx, millis, redisModuleTimeout, e);
e->timer_active = 1;
}
/* Check if Redis version is compatible with the adapter. */
static inline int redisModuleCompatibilityCheck(void) {
if (!RedisModule_EventLoopAdd ||
!RedisModule_EventLoopDel ||
!RedisModule_CreateTimer ||
!RedisModule_StopTimer) {
return REDIS_ERR;
}
return REDIS_OK;
}
static inline int redisModuleAttach(redisAsyncContext *ac, RedisModuleCtx *module_ctx) {
redisContext *c = &(ac->c);
redisModuleEvents *e;
/* Nothing should be attached when something is already attached */
if (ac->ev.data != NULL)
return REDIS_ERR;
/* Create container for context and r/w events */
e = (redisModuleEvents*)hi_malloc(sizeof(*e));
if (e == NULL)
return REDIS_ERR;
e->context = ac;
e->module_ctx = module_ctx;
e->fd = c->fd;
e->reading = e->writing = 0;
e->timer_active = 0;
/* Register functions to start/stop listening for events */
ac->ev.addRead = redisModuleAddRead;
ac->ev.delRead = redisModuleDelRead;
ac->ev.addWrite = redisModuleAddWrite;
ac->ev.delWrite = redisModuleDelWrite;
ac->ev.cleanup = redisModuleCleanup;
ac->ev.scheduleTimer = redisModuleSetTimeout;
ac->ev.data = e;
return REDIS_OK;
}
#endif

227
deps/hiredis/async.c vendored
View File

@ -140,6 +140,7 @@ static redisAsyncContext *redisAsyncInitialize(redisContext *c) {
ac->ev.scheduleTimer = NULL;
ac->onConnect = NULL;
ac->onConnectNC = NULL;
ac->onDisconnect = NULL;
ac->replies.head = NULL;
@ -148,6 +149,7 @@ static redisAsyncContext *redisAsyncInitialize(redisContext *c) {
ac->sub.replies.tail = NULL;
ac->sub.channels = channels;
ac->sub.patterns = patterns;
ac->sub.pending_unsubs = 0;
return ac;
oom:
@ -225,17 +227,34 @@ redisAsyncContext *redisAsyncConnectUnix(const char *path) {
return redisAsyncConnectWithOptions(&options);
}
int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn) {
if (ac->onConnect == NULL) {
ac->onConnect = fn;
static int
redisAsyncSetConnectCallbackImpl(redisAsyncContext *ac, redisConnectCallback *fn,
redisConnectCallbackNC *fn_nc)
{
/* If either are already set, this is an error */
if (ac->onConnect || ac->onConnectNC)
return REDIS_ERR;
/* The common way to detect an established connection is to wait for
* the first write event to be fired. This assumes the related event
* library functions are already set. */
_EL_ADD_WRITE(ac);
return REDIS_OK;
if (fn) {
ac->onConnect = fn;
} else if (fn_nc) {
ac->onConnectNC = fn_nc;
}
return REDIS_ERR;
/* The common way to detect an established connection is to wait for
* the first write event to be fired. This assumes the related event
* library functions are already set. */
_EL_ADD_WRITE(ac);
return REDIS_OK;
}
int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn) {
return redisAsyncSetConnectCallbackImpl(ac, fn, NULL);
}
int redisAsyncSetConnectCallbackNC(redisAsyncContext *ac, redisConnectCallbackNC *fn) {
return redisAsyncSetConnectCallbackImpl(ac, NULL, fn);
}
int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn) {
@ -302,6 +321,43 @@ static void __redisRunPushCallback(redisAsyncContext *ac, redisReply *reply) {
}
}
static void __redisRunConnectCallback(redisAsyncContext *ac, int status)
{
if (ac->onConnect == NULL && ac->onConnectNC == NULL)
return;
if (!(ac->c.flags & REDIS_IN_CALLBACK)) {
ac->c.flags |= REDIS_IN_CALLBACK;
if (ac->onConnect) {
ac->onConnect(ac, status);
} else {
ac->onConnectNC(ac, status);
}
ac->c.flags &= ~REDIS_IN_CALLBACK;
} else {
/* already in callback */
if (ac->onConnect) {
ac->onConnect(ac, status);
} else {
ac->onConnectNC(ac, status);
}
}
}
static void __redisRunDisconnectCallback(redisAsyncContext *ac, int status)
{
if (ac->onDisconnect) {
if (!(ac->c.flags & REDIS_IN_CALLBACK)) {
ac->c.flags |= REDIS_IN_CALLBACK;
ac->onDisconnect(ac, status);
ac->c.flags &= ~REDIS_IN_CALLBACK;
} else {
/* already in callback */
ac->onDisconnect(ac, status);
}
}
}
/* Helper function to free the context. */
static void __redisAsyncFree(redisAsyncContext *ac) {
redisContext *c = &(ac->c);
@ -337,12 +393,11 @@ static void __redisAsyncFree(redisAsyncContext *ac) {
/* Execute disconnect callback. When redisAsyncFree() initiated destroying
* this context, the status will always be REDIS_OK. */
if (ac->onDisconnect && (c->flags & REDIS_CONNECTED)) {
if (c->flags & REDIS_FREEING) {
ac->onDisconnect(ac,REDIS_OK);
} else {
ac->onDisconnect(ac,(ac->err == 0) ? REDIS_OK : REDIS_ERR);
}
if (c->flags & REDIS_CONNECTED) {
int status = ac->err == 0 ? REDIS_OK : REDIS_ERR;
if (c->flags & REDIS_FREEING)
status = REDIS_OK;
__redisRunDisconnectCallback(ac, status);
}
if (ac->dataCleanup) {
@ -358,7 +413,11 @@ static void __redisAsyncFree(redisAsyncContext *ac) {
* free'ing. To do so, a flag is set on the context which is picked up by
* redisProcessCallbacks(). Otherwise, the context is immediately free'd. */
void redisAsyncFree(redisAsyncContext *ac) {
if (ac == NULL)
return;
redisContext *c = &(ac->c);
c->flags |= REDIS_FREEING;
if (!(c->flags & REDIS_IN_CALLBACK))
__redisAsyncFree(ac);
@ -411,11 +470,11 @@ void redisAsyncDisconnect(redisAsyncContext *ac) {
static int __redisGetSubscribeCallback(redisAsyncContext *ac, redisReply *reply, redisCallback *dstcb) {
redisContext *c = &(ac->c);
dict *callbacks;
redisCallback *cb;
redisCallback *cb = NULL;
dictEntry *de;
int pvariant;
char *stype;
hisds sname;
hisds sname = NULL;
/* Match reply with the expected format of a pushed message.
* The type and number of elements (3 to 4) are specified at:
@ -432,42 +491,43 @@ static int __redisGetSubscribeCallback(redisAsyncContext *ac, redisReply *reply,
callbacks = ac->sub.channels;
/* Locate the right callback */
assert(reply->element[1]->type == REDIS_REPLY_STRING);
sname = hi_sdsnewlen(reply->element[1]->str,reply->element[1]->len);
if (sname == NULL)
goto oom;
if (reply->element[1]->type == REDIS_REPLY_STRING) {
sname = hi_sdsnewlen(reply->element[1]->str,reply->element[1]->len);
if (sname == NULL) goto oom;
de = dictFind(callbacks,sname);
if (de != NULL) {
cb = dictGetEntryVal(de);
/* If this is an subscribe reply decrease pending counter. */
if (strcasecmp(stype+pvariant,"subscribe") == 0) {
cb->pending_subs -= 1;
if ((de = dictFind(callbacks,sname)) != NULL) {
cb = dictGetEntryVal(de);
memcpy(dstcb,cb,sizeof(*dstcb));
}
}
memcpy(dstcb,cb,sizeof(*dstcb));
/* If this is an subscribe reply decrease pending counter. */
if (strcasecmp(stype+pvariant,"subscribe") == 0) {
assert(cb != NULL);
cb->pending_subs -= 1;
/* If this is an unsubscribe message, remove it. */
if (strcasecmp(stype+pvariant,"unsubscribe") == 0) {
if (cb->pending_subs == 0)
dictDelete(callbacks,sname);
} else if (strcasecmp(stype+pvariant,"unsubscribe") == 0) {
if (cb == NULL)
ac->sub.pending_unsubs -= 1;
else if (cb->pending_subs == 0)
dictDelete(callbacks,sname);
/* If this was the last unsubscribe message, revert to
* non-subscribe mode. */
assert(reply->element[2]->type == REDIS_REPLY_INTEGER);
/* If this was the last unsubscribe message, revert to
* non-subscribe mode. */
assert(reply->element[2]->type == REDIS_REPLY_INTEGER);
/* Unset subscribed flag only when no pipelined pending subscribe. */
if (reply->element[2]->integer == 0
&& dictSize(ac->sub.channels) == 0
&& dictSize(ac->sub.patterns) == 0) {
c->flags &= ~REDIS_SUBSCRIBED;
/* Unset subscribed flag only when no pipelined pending subscribe
* or pending unsubscribe replies. */
if (reply->element[2]->integer == 0
&& dictSize(ac->sub.channels) == 0
&& dictSize(ac->sub.patterns) == 0
&& ac->sub.pending_unsubs == 0) {
c->flags &= ~REDIS_SUBSCRIBED;
/* Move ongoing regular command callbacks. */
redisCallback cb;
while (__redisShiftCallback(&ac->sub.replies,&cb) == REDIS_OK) {
__redisPushCallback(&ac->replies,&cb);
}
/* Move ongoing regular command callbacks. */
redisCallback cb;
while (__redisShiftCallback(&ac->sub.replies,&cb) == REDIS_OK) {
__redisPushCallback(&ac->replies,&cb);
}
}
}
@ -479,6 +539,7 @@ static int __redisGetSubscribeCallback(redisAsyncContext *ac, redisReply *reply,
return REDIS_OK;
oom:
__redisSetError(&(ac->c), REDIS_ERR_OOM, "Out of memory");
__redisAsyncCopyError(ac);
return REDIS_ERR;
}
@ -540,7 +601,7 @@ void redisProcessCallbacks(redisAsyncContext *ac) {
/* Even if the context is subscribed, pending regular
* callbacks will get a reply before pub/sub messages arrive. */
redisCallback cb = {NULL, NULL, 0, NULL};
redisCallback cb = {NULL, NULL, 0, 0, NULL};
if (__redisShiftCallback(&ac->replies,&cb) != REDIS_OK) {
/*
* A spontaneous reply in a not-subscribed context can be the error
@ -601,7 +662,7 @@ void redisProcessCallbacks(redisAsyncContext *ac) {
}
static void __redisAsyncHandleConnectFailure(redisAsyncContext *ac) {
if (ac->onConnect) ac->onConnect(ac, REDIS_ERR);
__redisRunConnectCallback(ac, REDIS_ERR);
__redisAsyncDisconnect(ac);
}
@ -626,8 +687,19 @@ static int __redisAsyncHandleConnect(redisAsyncContext *ac) {
return REDIS_ERR;
}
if (ac->onConnect) ac->onConnect(ac, REDIS_OK);
/* flag us as fully connect, but allow the callback
* to disconnect. For that reason, permit the function
* to delete the context here after callback return.
*/
c->flags |= REDIS_CONNECTED;
__redisRunConnectCallback(ac, REDIS_OK);
if ((ac->c.flags & REDIS_DISCONNECTING)) {
redisAsyncDisconnect(ac);
return REDIS_ERR;
} else if ((ac->c.flags & REDIS_FREEING)) {
redisAsyncFree(ac);
return REDIS_ERR;
}
return REDIS_OK;
} else {
return REDIS_OK;
@ -651,6 +723,8 @@ void redisAsyncRead(redisAsyncContext *ac) {
*/
void redisAsyncHandleRead(redisAsyncContext *ac) {
redisContext *c = &(ac->c);
/* must not be called from a callback */
assert(!(c->flags & REDIS_IN_CALLBACK));
if (!(c->flags & REDIS_CONNECTED)) {
/* Abort connect was not successful. */
@ -684,6 +758,8 @@ void redisAsyncWrite(redisAsyncContext *ac) {
void redisAsyncHandleWrite(redisAsyncContext *ac) {
redisContext *c = &(ac->c);
/* must not be called from a callback */
assert(!(c->flags & REDIS_IN_CALLBACK));
if (!(c->flags & REDIS_CONNECTED)) {
/* Abort connect was not successful. */
@ -700,6 +776,8 @@ void redisAsyncHandleWrite(redisAsyncContext *ac) {
void redisAsyncHandleTimeout(redisAsyncContext *ac) {
redisContext *c = &(ac->c);
redisCallback cb;
/* must not be called from a callback */
assert(!(c->flags & REDIS_IN_CALLBACK));
if ((c->flags & REDIS_CONNECTED)) {
if (ac->replies.head == NULL && ac->sub.replies.head == NULL) {
@ -719,8 +797,8 @@ void redisAsyncHandleTimeout(redisAsyncContext *ac) {
__redisAsyncCopyError(ac);
}
if (!(c->flags & REDIS_CONNECTED) && ac->onConnect) {
ac->onConnect(ac, REDIS_ERR);
if (!(c->flags & REDIS_CONNECTED)) {
__redisRunConnectCallback(ac, REDIS_ERR);
}
while (__redisShiftCallback(&ac->replies, &cb) == REDIS_OK) {
@ -757,6 +835,7 @@ static int __redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void
redisContext *c = &(ac->c);
redisCallback cb;
struct dict *cbdict;
dictIterator it;
dictEntry *de;
redisCallback *existcb;
int pvariant, hasnext;
@ -773,6 +852,7 @@ static int __redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void
cb.fn = fn;
cb.privdata = privdata;
cb.pending_subs = 1;
cb.unsubscribe_sent = 0;
/* Find out which command will be appended. */
p = nextArgument(cmd,&cstr,&clen);
@ -812,6 +892,51 @@ static int __redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void
* subscribed to one or more channels or patterns. */
if (!(c->flags & REDIS_SUBSCRIBED)) return REDIS_ERR;
if (pvariant)
cbdict = ac->sub.patterns;
else
cbdict = ac->sub.channels;
if (hasnext) {
/* Send an unsubscribe with specific channels/patterns.
* Bookkeeping the number of expected replies */
while ((p = nextArgument(p,&astr,&alen)) != NULL) {
sname = hi_sdsnewlen(astr,alen);
if (sname == NULL)
goto oom;
de = dictFind(cbdict,sname);
if (de != NULL) {
existcb = dictGetEntryVal(de);
if (existcb->unsubscribe_sent == 0)
existcb->unsubscribe_sent = 1;
else
/* Already sent, reply to be ignored */
ac->sub.pending_unsubs += 1;
} else {
/* Not subscribed to, reply to be ignored */
ac->sub.pending_unsubs += 1;
}
hi_sdsfree(sname);
}
} else {
/* Send an unsubscribe without specific channels/patterns.
* Bookkeeping the number of expected replies */
int no_subs = 1;
dictInitIterator(&it,cbdict);
while ((de = dictNext(&it)) != NULL) {
existcb = dictGetEntryVal(de);
if (existcb->unsubscribe_sent == 0) {
existcb->unsubscribe_sent = 1;
no_subs = 0;
}
}
/* Unsubscribing to all channels/patterns, where none is
* subscribed to, results in a single reply to be ignored. */
if (no_subs == 1)
ac->sub.pending_unsubs += 1;
}
/* (P)UNSUBSCRIBE does not have its own response: every channel or
* pattern that is unsubscribed will receive a message. This means we
* should not append a callback function for this command. */

View File

@ -46,6 +46,7 @@ typedef struct redisCallback {
struct redisCallback *next; /* simple singly linked list */
redisCallbackFn *fn;
int pending_subs;
int unsubscribe_sent;
void *privdata;
} redisCallback;
@ -57,6 +58,7 @@ typedef struct redisCallbackList {
/* Connection callback prototypes */
typedef void (redisDisconnectCallback)(const struct redisAsyncContext*, int status);
typedef void (redisConnectCallback)(const struct redisAsyncContext*, int status);
typedef void (redisConnectCallbackNC)(struct redisAsyncContext *, int status);
typedef void(redisTimerCallback)(void *timer, void *privdata);
/* Context for an async connection to Redis */
@ -92,6 +94,7 @@ typedef struct redisAsyncContext {
/* Called when the first write event was received. */
redisConnectCallback *onConnect;
redisConnectCallbackNC *onConnectNC;
/* Regular command callbacks */
redisCallbackList replies;
@ -105,6 +108,7 @@ typedef struct redisAsyncContext {
redisCallbackList replies;
struct dict *channels;
struct dict *patterns;
int pending_unsubs;
} sub;
/* Any configured RESP3 PUSH handler */
@ -119,6 +123,7 @@ redisAsyncContext *redisAsyncConnectBindWithReuse(const char *ip, int port,
const char *source_addr);
redisAsyncContext *redisAsyncConnectUnix(const char *path);
int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn);
int redisAsyncSetConnectCallbackNC(redisAsyncContext *ac, redisConnectCallbackNC *fn);
int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn);
redisAsyncPushFn *redisAsyncSetPushCallback(redisAsyncContext *ac, redisAsyncPushFn *fn);

View File

@ -25,12 +25,24 @@ if (LIBEVENT)
TARGET_LINK_LIBRARIES(example-libevent hiredis event)
ENDIF()
FIND_PATH(LIBHV hv/hv.h)
IF (LIBHV)
ADD_EXECUTABLE(example-libhv example-libhv.c)
TARGET_LINK_LIBRARIES(example-libhv hiredis hv)
ENDIF()
FIND_PATH(LIBUV uv.h)
IF (LIBUV)
ADD_EXECUTABLE(example-libuv example-libuv.c)
TARGET_LINK_LIBRARIES(example-libuv hiredis uv)
ENDIF()
FIND_PATH(LIBSDEVENT systemd/sd-event.h)
IF (LIBSDEVENT)
ADD_EXECUTABLE(example-libsdevent example-libsdevent.c)
TARGET_LINK_LIBRARIES(example-libsdevent hiredis systemd)
ENDIF()
IF (APPLE)
FIND_LIBRARY(CF CoreFoundation)
ADD_EXECUTABLE(example-macosx example-macosx.c)

View File

@ -56,7 +56,7 @@ int main (int argc, char **argv) {
const char *caCert = argc > 5 ? argv[6] : NULL;
redisSSLContext *ssl;
redisSSLContextError ssl_error;
redisSSLContextError ssl_error = REDIS_SSL_CTX_NONE;
redisInitOpenSSL();

70
deps/hiredis/examples/example-libhv.c vendored Normal file
View File

@ -0,0 +1,70 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <hiredis.h>
#include <async.h>
#include <adapters/libhv.h>
void getCallback(redisAsyncContext *c, void *r, void *privdata) {
redisReply *reply = r;
if (reply == NULL) return;
printf("argv[%s]: %s\n", (char*)privdata, reply->str);
/* Disconnect after receiving the reply to GET */
redisAsyncDisconnect(c);
}
void debugCallback(redisAsyncContext *c, void *r, void *privdata) {
(void)privdata;
redisReply *reply = r;
if (reply == NULL) {
printf("`DEBUG SLEEP` error: %s\n", c->errstr ? c->errstr : "unknown error");
return;
}
redisAsyncDisconnect(c);
}
void connectCallback(const redisAsyncContext *c, int status) {
if (status != REDIS_OK) {
printf("Error: %s\n", c->errstr);
return;
}
printf("Connected...\n");
}
void disconnectCallback(const redisAsyncContext *c, int status) {
if (status != REDIS_OK) {
printf("Error: %s\n", c->errstr);
return;
}
printf("Disconnected...\n");
}
int main (int argc, char **argv) {
#ifndef _WIN32
signal(SIGPIPE, SIG_IGN);
#endif
redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379);
if (c->err) {
/* Let *c leak for now... */
printf("Error: %s\n", c->errstr);
return 1;
}
hloop_t* loop = hloop_new(HLOOP_FLAG_QUIT_WHEN_NO_ACTIVE_EVENTS);
redisLibhvAttach(c, loop);
redisAsyncSetTimeout(c, (struct timeval){.tv_sec = 0, .tv_usec = 500000});
redisAsyncSetConnectCallback(c,connectCallback);
redisAsyncSetDisconnectCallback(c,disconnectCallback);
redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1]));
redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key");
redisAsyncCommand(c, debugCallback, NULL, "DEBUG SLEEP %d", 1);
hloop_run(loop);
hloop_free(&loop);
return 0;
}

View File

@ -0,0 +1,86 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <hiredis.h>
#include <async.h>
#include <adapters/libsdevent.h>
void debugCallback(redisAsyncContext *c, void *r, void *privdata) {
(void)privdata;
redisReply *reply = r;
if (reply == NULL) {
/* The DEBUG SLEEP command will almost always fail, because we have set a 1 second timeout */
printf("`DEBUG SLEEP` error: %s\n", c->errstr ? c->errstr : "unknown error");
return;
}
/* Disconnect after receiving the reply of DEBUG SLEEP (which will not)*/
redisAsyncDisconnect(c);
}
void getCallback(redisAsyncContext *c, void *r, void *privdata) {
redisReply *reply = r;
if (reply == NULL) {
printf("`GET key` error: %s\n", c->errstr ? c->errstr : "unknown error");
return;
}
printf("`GET key` result: argv[%s]: %s\n", (char*)privdata, reply->str);
/* start another request that demonstrate timeout */
redisAsyncCommand(c, debugCallback, NULL, "DEBUG SLEEP %f", 1.5);
}
void connectCallback(const redisAsyncContext *c, int status) {
if (status != REDIS_OK) {
printf("connect error: %s\n", c->errstr);
return;
}
printf("Connected...\n");
}
void disconnectCallback(const redisAsyncContext *c, int status) {
if (status != REDIS_OK) {
printf("disconnect because of error: %s\n", c->errstr);
return;
}
printf("Disconnected...\n");
}
int main (int argc, char **argv) {
signal(SIGPIPE, SIG_IGN);
struct sd_event *event;
sd_event_default(&event);
redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379);
if (c->err) {
printf("Error: %s\n", c->errstr);
redisAsyncFree(c);
return 1;
}
redisLibsdeventAttach(c,event);
redisAsyncSetConnectCallback(c,connectCallback);
redisAsyncSetDisconnectCallback(c,disconnectCallback);
redisAsyncSetTimeout(c, (struct timeval){ .tv_sec = 1, .tv_usec = 0});
/*
In this demo, we first `set key`, then `get key` to demonstrate the basic usage of libsdevent adapter.
Then in `getCallback`, we start a `debug sleep` command to create 1.5 second long request.
Because we have set a 1 second timeout to the connection, the command will always fail with a
timeout error, which is shown in the `debugCallback`.
*/
redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1]));
redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key");
/* sd-event does not quit when there are no handlers registered. Manually exit after 1.5 seconds */
sd_event_source *s;
sd_event_add_time_relative(event, &s, CLOCK_MONOTONIC, 1500000, 1, NULL, NULL);
sd_event_loop(event);
sd_event_source_disable_unref(s);
sd_event_unref(event);
return 0;
}

62
deps/hiredis/examples/example-poll.c vendored Normal file
View File

@ -0,0 +1,62 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <async.h>
#include <adapters/poll.h>
/* Put in the global scope, so that loop can be explicitly stopped */
static int exit_loop = 0;
void getCallback(redisAsyncContext *c, void *r, void *privdata) {
redisReply *reply = r;
if (reply == NULL) return;
printf("argv[%s]: %s\n", (char*)privdata, reply->str);
/* Disconnect after receiving the reply to GET */
redisAsyncDisconnect(c);
}
void connectCallback(const redisAsyncContext *c, int status) {
if (status != REDIS_OK) {
printf("Error: %s\n", c->errstr);
exit_loop = 1;
return;
}
printf("Connected...\n");
}
void disconnectCallback(const redisAsyncContext *c, int status) {
exit_loop = 1;
if (status != REDIS_OK) {
printf("Error: %s\n", c->errstr);
return;
}
printf("Disconnected...\n");
}
int main (int argc, char **argv) {
signal(SIGPIPE, SIG_IGN);
redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379);
if (c->err) {
/* Let *c leak for now... */
printf("Error: %s\n", c->errstr);
return 1;
}
redisPollAttach(c);
redisAsyncSetConnectCallback(c,connectCallback);
redisAsyncSetDisconnectCallback(c,disconnectCallback);
redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1]));
redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key");
while (!exit_loop)
{
redisPollTick(c, 0.1);
}
return 0;
}

View File

@ -0,0 +1,101 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <hiredis.h>
#include <async.h>
#include <adapters/redismoduleapi.h>
void debugCallback(redisAsyncContext *c, void *r, void *privdata) {
(void)privdata; //unused
redisReply *reply = r;
if (reply == NULL) {
/* The DEBUG SLEEP command will almost always fail, because we have set a 1 second timeout */
printf("`DEBUG SLEEP` error: %s\n", c->errstr ? c->errstr : "unknown error");
return;
}
/* Disconnect after receiving the reply of DEBUG SLEEP (which will not)*/
redisAsyncDisconnect(c);
}
void getCallback(redisAsyncContext *c, void *r, void *privdata) {
redisReply *reply = r;
if (reply == NULL) {
if (c->errstr) {
printf("errstr: %s\n", c->errstr);
}
return;
}
printf("argv[%s]: %s\n", (char*)privdata, reply->str);
/* start another request that demonstrate timeout */
redisAsyncCommand(c, debugCallback, NULL, "DEBUG SLEEP %f", 1.5);
}
void connectCallback(const redisAsyncContext *c, int status) {
if (status != REDIS_OK) {
printf("Error: %s\n", c->errstr);
return;
}
printf("Connected...\n");
}
void disconnectCallback(const redisAsyncContext *c, int status) {
if (status != REDIS_OK) {
printf("Error: %s\n", c->errstr);
return;
}
printf("Disconnected...\n");
}
/*
* This example requires Redis 7.0 or above.
*
* 1- Compile this file as a shared library. Directory of "redismodule.h" must
* be in the include path.
* gcc -fPIC -shared -I../../redis/src/ -I.. example-redismoduleapi.c -o example-redismoduleapi.so
*
* 2- Load module:
* redis-server --loadmodule ./example-redismoduleapi.so value
*/
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
int ret = RedisModule_Init(ctx, "example-redismoduleapi", 1, REDISMODULE_APIVER_1);
if (ret != REDISMODULE_OK) {
printf("error module init \n");
return REDISMODULE_ERR;
}
if (redisModuleCompatibilityCheck() != REDIS_OK) {
printf("Redis 7.0 or above is required! \n");
return REDISMODULE_ERR;
}
redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379);
if (c->err) {
/* Let *c leak for now... */
printf("Error: %s\n", c->errstr);
return 1;
}
size_t len;
const char *val = RedisModule_StringPtrLen(argv[argc-1], &len);
RedisModuleCtx *module_ctx = RedisModule_GetDetachedThreadSafeContext(ctx);
redisModuleAttach(c, module_ctx);
redisAsyncSetConnectCallback(c,connectCallback);
redisAsyncSetDisconnectCallback(c,disconnectCallback);
redisAsyncSetTimeout(c, (struct timeval){ .tv_sec = 1, .tv_usec = 0});
/*
In this demo, we first `set key`, then `get key` to demonstrate the basic usage of the adapter.
Then in `getCallback`, we start a `debug sleep` command to create 1.5 second long request.
Because we have set a 1 second timeout to the connection, the command will always fail with a
timeout error, which is shown in the `debugCallback`.
*/
redisAsyncCommand(c, NULL, NULL, "SET key %b", val, len);
redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key");
return 0;
}

View File

@ -12,7 +12,7 @@
int main(int argc, char **argv) {
unsigned int j;
redisSSLContext *ssl;
redisSSLContextError ssl_error;
redisSSLContextError ssl_error = REDIS_SSL_CTX_NONE;
redisContext *c;
redisReply *reply;
if (argc < 4) {
@ -27,9 +27,8 @@ int main(int argc, char **argv) {
redisInitOpenSSL();
ssl = redisCreateSSLContext(ca, NULL, cert, key, NULL, &ssl_error);
if (!ssl) {
printf("SSL Context error: %s\n",
redisSSLContextGetError(ssl_error));
if (!ssl || ssl_error != REDIS_SSL_CTX_NONE) {
printf("SSL Context error: %s\n", redisSSLContextGetError(ssl_error));
exit(1);
}

View File

@ -7,6 +7,54 @@
#include <winsock2.h> /* For struct timeval */
#endif
static void example_argv_command(redisContext *c, size_t n) {
char **argv, tmp[42];
size_t *argvlen;
redisReply *reply;
/* We're allocating two additional elements for command and key */
argv = malloc(sizeof(*argv) * (2 + n));
argvlen = malloc(sizeof(*argvlen) * (2 + n));
/* First the command */
argv[0] = (char*)"RPUSH";
argvlen[0] = sizeof("RPUSH") - 1;
/* Now our key */
argv[1] = (char*)"argvlist";
argvlen[1] = sizeof("argvlist") - 1;
/* Now add the entries we wish to add to the list */
for (size_t i = 2; i < (n + 2); i++) {
argvlen[i] = snprintf(tmp, sizeof(tmp), "argv-element-%zu", i - 2);
argv[i] = strdup(tmp);
}
/* Execute the command using redisCommandArgv. We're sending the arguments with
* two explicit arrays. One for each argument's string, and the other for its
* length. */
reply = redisCommandArgv(c, n + 2, (const char **)argv, (const size_t*)argvlen);
if (reply == NULL || c->err) {
fprintf(stderr, "Error: Couldn't execute redisCommandArgv\n");
exit(1);
}
if (reply->type == REDIS_REPLY_INTEGER) {
printf("%s reply: %lld\n", argv[0], reply->integer);
}
freeReplyObject(reply);
/* Clean up */
for (size_t i = 2; i < (n + 2); i++) {
free(argv[i]);
}
free(argv);
free(argvlen);
}
int main(int argc, char **argv) {
unsigned int j, isunix = 0;
redisContext *c;
@ -87,6 +135,9 @@ int main(int argc, char **argv) {
}
freeReplyObject(reply);
/* See function for an example of redisCommandArgv */
example_argv_command(c, 10);
/* Disconnects and frees the context */
redisFree(c);

View File

@ -48,10 +48,9 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
memcpy(new_str, data, size);
new_str[size] = '\0';
redisFormatCommand(&cmd, new_str);
if (cmd != NULL)
if (redisFormatCommand(&cmd, new_str) != -1)
hi_free(cmd);
free(new_str);
return 0;
}

View File

@ -2,11 +2,11 @@
set_and_check(hiredis_INCLUDEDIR "@PACKAGE_INCLUDE_INSTALL_DIR@")
IF (NOT TARGET hiredis::hiredis)
IF (NOT TARGET hiredis::@hiredis_export_name@)
INCLUDE(${CMAKE_CURRENT_LIST_DIR}/hiredis-targets.cmake)
ENDIF()
SET(hiredis_LIBRARIES hiredis::hiredis)
SET(hiredis_LIBRARIES hiredis::@hiredis_export_name@)
SET(hiredis_INCLUDE_DIRS ${hiredis_INCLUDEDIR})
check_required_components(hiredis)

View File

@ -48,6 +48,7 @@ extern int redisContextUpdateConnectTimeout(redisContext *c, const struct timeva
extern int redisContextUpdateCommandTimeout(redisContext *c, const struct timeval *timeout);
static redisContextFuncs redisContextDefaultFuncs = {
.close = redisNetClose,
.free_privctx = NULL,
.async_read = redisAsyncRead,
.async_write = redisAsyncWrite,
@ -221,6 +222,9 @@ static void *createIntegerObject(const redisReadTask *task, long long value) {
static void *createDoubleObject(const redisReadTask *task, double value, char *str, size_t len) {
redisReply *r, *parent;
if (len == SIZE_MAX) // Prevents hi_malloc(0) if len equals to SIZE_MAX
return NULL;
r = createReplyObject(REDIS_REPLY_DOUBLE);
if (r == NULL)
return NULL;
@ -388,17 +392,22 @@ int redisvFormatCommand(char **target, const char *format, va_list ap) {
while (*_p != '\0' && strchr(flags,*_p) != NULL) _p++;
/* Field width */
while (*_p != '\0' && isdigit(*_p)) _p++;
while (*_p != '\0' && isdigit((int) *_p)) _p++;
/* Precision */
if (*_p == '.') {
_p++;
while (*_p != '\0' && isdigit(*_p)) _p++;
while (*_p != '\0' && isdigit((int) *_p)) _p++;
}
/* Copy va_list before consuming with va_arg */
va_copy(_cpy,ap);
/* Make sure we have more characters otherwise strchr() accepts
* '\0' as an integer specifier. This is checked after above
* va_copy() to avoid UB in fmt_invalid's call to va_end(). */
if (*_p == '\0') goto fmt_invalid;
/* Integer conversion (without modifiers) */
if (strchr(intfmts,*_p) != NULL) {
va_arg(ap,int);
@ -477,6 +486,8 @@ int redisvFormatCommand(char **target, const char *format, va_list ap) {
touched = 1;
c++;
if (*c == '\0')
break;
}
c++;
}
@ -719,7 +730,10 @@ static redisContext *redisContextInit(void) {
void redisFree(redisContext *c) {
if (c == NULL)
return;
redisNetClose(c);
if (c->funcs && c->funcs->close) {
c->funcs->close(c);
}
hi_sdsfree(c->obuf);
redisReaderFree(c->reader);
@ -733,7 +747,7 @@ void redisFree(redisContext *c) {
if (c->privdata && c->free_privdata)
c->free_privdata(c->privdata);
if (c->funcs->free_privctx)
if (c->funcs && c->funcs->free_privctx)
c->funcs->free_privctx(c->privctx);
memset(c, 0xff, sizeof(*c));
@ -756,7 +770,9 @@ int redisReconnect(redisContext *c) {
c->privctx = NULL;
}
redisNetClose(c);
if (c->funcs && c->funcs->close) {
c->funcs->close(c);
}
hi_sdsfree(c->obuf);
redisReaderFree(c->reader);
@ -806,6 +822,12 @@ redisContext *redisConnectWithOptions(const redisOptions *options) {
if (options->options & REDIS_OPT_NOAUTOFREEREPLIES) {
c->flags |= REDIS_NO_AUTO_FREE_REPLIES;
}
if (options->options & REDIS_OPT_PREFER_IPV4) {
c->flags |= REDIS_PREFER_IPV4;
}
if (options->options & REDIS_OPT_PREFER_IPV6) {
c->flags |= REDIS_PREFER_IPV6;
}
/* Set any user supplied RESP3 PUSH handler or use freeReplyObject
* as a default unless specifically flagged that we don't want one. */
@ -838,7 +860,9 @@ redisContext *redisConnectWithOptions(const redisOptions *options) {
return NULL;
}
if (options->command_timeout != NULL && (c->flags & REDIS_BLOCK) && c->fd != REDIS_INVALID_FD) {
if (c->err == 0 && c->fd != REDIS_INVALID_FD &&
options->command_timeout != NULL && (c->flags & REDIS_BLOCK))
{
redisContextSetTimeout(c, *options->command_timeout);
}
@ -920,11 +944,18 @@ int redisSetTimeout(redisContext *c, const struct timeval tv) {
return REDIS_ERR;
}
int redisEnableKeepAliveWithInterval(redisContext *c, int interval) {
return redisKeepAlive(c, interval);
}
/* Enable connection KeepAlive. */
int redisEnableKeepAlive(redisContext *c) {
if (redisKeepAlive(c, REDIS_KEEPALIVE_INTERVAL) != REDIS_OK)
return REDIS_ERR;
return REDIS_OK;
return redisKeepAlive(c, REDIS_KEEPALIVE_INTERVAL);
}
/* Set the socket option TCP_USER_TIMEOUT. */
int redisSetTcpUserTimeout(redisContext *c, unsigned int timeout) {
return redisContextSetTcpUserTimeout(c, timeout);
}
/* Set a user provided RESP3 PUSH handler and return any old one set. */
@ -964,8 +995,8 @@ int redisBufferRead(redisContext *c) {
* successfully written to the socket. When the buffer is empty after the
* write operation, "done" is set to 1 (if given).
*
* Returns REDIS_ERR if an error occurred trying to write and sets
* c->errstr to hold the appropriate error string.
* Returns REDIS_ERR if an unrecoverable error occurred in the underlying
* c->funcs->write function.
*/
int redisBufferWrite(redisContext *c, int *done) {

View File

@ -46,9 +46,9 @@ typedef long long ssize_t;
#include "alloc.h" /* for allocation wrappers */
#define HIREDIS_MAJOR 1
#define HIREDIS_MINOR 0
#define HIREDIS_PATCH 3
#define HIREDIS_SONAME 1.0.3-dev
#define HIREDIS_MINOR 2
#define HIREDIS_PATCH 0
#define HIREDIS_SONAME 1.1.0
/* Connection type can be blocking or non-blocking and is set in the
* least significant bit of the flags field in redisContext. */
@ -92,6 +92,11 @@ typedef long long ssize_t;
/* Flag that indicates the user does not want replies to be automatically freed */
#define REDIS_NO_AUTO_FREE_REPLIES 0x400
/* Flags to prefer IPv6 or IPv4 when doing DNS lookup. (If both are set,
* AF_UNSPEC is used.) */
#define REDIS_PREFER_IPV4 0x800
#define REDIS_PREFER_IPV6 0x1000
#define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */
/* number of times we retry to connect in the case of EADDRNOTAVAIL and
@ -149,20 +154,17 @@ struct redisSsl;
#define REDIS_OPT_NONBLOCK 0x01
#define REDIS_OPT_REUSEADDR 0x02
/**
* Don't automatically free the async object on a connection failure,
* or other implicit conditions. Only free on an explicit call to disconnect() or free()
*/
#define REDIS_OPT_NOAUTOFREE 0x04
/* Don't automatically intercept and free RESP3 PUSH replies. */
#define REDIS_OPT_NO_PUSH_AUTOFREE 0x08
/**
* Don't automatically free replies
*/
#define REDIS_OPT_NOAUTOFREEREPLIES 0x10
#define REDIS_OPT_NOAUTOFREE 0x04 /* Don't automatically free the async
* object on a connection failure, or
* other implicit conditions. Only free
* on an explicit call to disconnect()
* or free() */
#define REDIS_OPT_NO_PUSH_AUTOFREE 0x08 /* Don't automatically intercept and
* free RESP3 PUSH replies. */
#define REDIS_OPT_NOAUTOFREEREPLIES 0x10 /* Don't automatically free replies. */
#define REDIS_OPT_PREFER_IPV4 0x20 /* Prefer IPv4 in DNS lookups. */
#define REDIS_OPT_PREFER_IPV6 0x40 /* Prefer IPv6 in DNS lookups. */
#define REDIS_OPT_PREFER_IP_UNSPEC (REDIS_OPT_PREFER_IPV4 | REDIS_OPT_PREFER_IPV6)
/* In Unix systems a file descriptor is a regular signed int, with -1
* representing an invalid descriptor. In Windows it is a SOCKET
@ -220,27 +222,37 @@ typedef struct {
/**
* Helper macros to initialize options to their specified fields.
*/
#define REDIS_OPTIONS_SET_TCP(opts, ip_, port_) \
(opts)->type = REDIS_CONN_TCP; \
(opts)->endpoint.tcp.ip = ip_; \
(opts)->endpoint.tcp.port = port_;
#define REDIS_OPTIONS_SET_TCP(opts, ip_, port_) do { \
(opts)->type = REDIS_CONN_TCP; \
(opts)->endpoint.tcp.ip = ip_; \
(opts)->endpoint.tcp.port = port_; \
} while(0)
#define REDIS_OPTIONS_SET_UNIX(opts, path) \
(opts)->type = REDIS_CONN_UNIX; \
(opts)->endpoint.unix_socket = path;
#define REDIS_OPTIONS_SET_UNIX(opts, path) do { \
(opts)->type = REDIS_CONN_UNIX; \
(opts)->endpoint.unix_socket = path; \
} while(0)
#define REDIS_OPTIONS_SET_PRIVDATA(opts, data, dtor) \
(opts)->privdata = data; \
(opts)->free_privdata = dtor; \
#define REDIS_OPTIONS_SET_PRIVDATA(opts, data, dtor) do { \
(opts)->privdata = data; \
(opts)->free_privdata = dtor; \
} while(0)
typedef struct redisContextFuncs {
void (*close)(struct redisContext *);
void (*free_privctx)(void *);
void (*async_read)(struct redisAsyncContext *);
void (*async_write)(struct redisAsyncContext *);
/* Read/Write data to the underlying communication stream, returning the
* number of bytes read/written. In the event of an unrecoverable error
* these functions shall return a value < 0. In the event of a
* recoverable error, they should return 0. */
ssize_t (*read)(struct redisContext *, char *, size_t);
ssize_t (*write)(struct redisContext *);
} redisContextFuncs;
/* Context for a connection to Redis */
typedef struct redisContext {
const redisContextFuncs *funcs; /* Function table */
@ -310,6 +322,8 @@ int redisReconnect(redisContext *c);
redisPushFn *redisSetPushCallback(redisContext *c, redisPushFn *fn);
int redisSetTimeout(redisContext *c, const struct timeval tv);
int redisEnableKeepAlive(redisContext *c);
int redisEnableKeepAliveWithInterval(redisContext *c, int interval);
int redisSetTcpUserTimeout(redisContext *c, unsigned int timeout);
void redisFree(redisContext *c);
redisFD redisFreeKeepFd(redisContext *c);
int redisBufferRead(redisContext *c);

View File

@ -9,4 +9,4 @@ Name: hiredis
Description: Minimalistic C client library for Redis.
Version: @PROJECT_VERSION@
Libs: -L${libdir} -lhiredis
Cflags: -I${pkgincludedir} -D_FILE_OFFSET_BITS=64
Cflags: -I${pkgincludedir} -I${includedir} -D_FILE_OFFSET_BITS=64

View File

@ -2,6 +2,9 @@
set_and_check(hiredis_ssl_INCLUDEDIR "@PACKAGE_INCLUDE_INSTALL_DIR@")
include(CMakeFindDependencyMacro)
find_dependency(OpenSSL)
IF (NOT TARGET hiredis::hiredis_ssl)
INCLUDE(${CMAKE_CURRENT_LIST_DIR}/hiredis_ssl-targets.cmake)
ENDIF()

View File

@ -56,11 +56,33 @@ typedef enum {
REDIS_SSL_CTX_CERT_KEY_REQUIRED, /* Client cert and key must both be specified or skipped */
REDIS_SSL_CTX_CA_CERT_LOAD_FAILED, /* Failed to load CA Certificate or CA Path */
REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED, /* Failed to load client certificate */
REDIS_SSL_CTX_CLIENT_DEFAULT_CERT_FAILED, /* Failed to set client default certificate directory */
REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED, /* Failed to load private key */
REDIS_SSL_CTX_OS_CERTSTORE_OPEN_FAILED, /* Failed to open system certificate store */
REDIS_SSL_CTX_OS_CERT_ADD_FAILED /* Failed to add CA certificates obtained from system to the SSL context */
} redisSSLContextError;
/* Constants that mirror OpenSSL's verify modes. By default,
* REDIS_SSL_VERIFY_PEER is used with redisCreateSSLContext().
* Some Redis clients disable peer verification if there are no
* certificates specified.
*/
#define REDIS_SSL_VERIFY_NONE 0x00
#define REDIS_SSL_VERIFY_PEER 0x01
#define REDIS_SSL_VERIFY_FAIL_IF_NO_PEER_CERT 0x02
#define REDIS_SSL_VERIFY_CLIENT_ONCE 0x04
#define REDIS_SSL_VERIFY_POST_HANDSHAKE 0x08
/* Options to create an OpenSSL context. */
typedef struct {
const char *cacert_filename;
const char *capath;
const char *cert_filename;
const char *private_key_filename;
const char *server_name;
int verify_mode;
} redisSSLOptions;
/**
* Return the error message corresponding with the specified error code.
*/
@ -101,6 +123,18 @@ redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *
const char *cert_filename, const char *private_key_filename,
const char *server_name, redisSSLContextError *error);
/**
* Helper function to initialize an OpenSSL context that can be used
* to initiate SSL connections. This is a more extensible version of redisCreateSSLContext().
*
* options contains a structure of SSL options to use.
*
* If error is non-null, it will be populated in case the context creation fails
* (returning a NULL).
*/
redisSSLContext *redisCreateSSLContextWithOptions(redisSSLOptions *options,
redisSSLContextError *error);
/**
* Free a previously created OpenSSL context.
*/

View File

@ -1,6 +1,7 @@
prefix=@CMAKE_INSTALL_PREFIX@
install_libdir=@CMAKE_INSTALL_LIBDIR@
exec_prefix=${prefix}
libdir=${exec_prefix}/lib
libdir=${exec_prefix}/${install_libdir}
includedir=${prefix}/include
pkgincludedir=${includedir}/hiredis

96
deps/hiredis/net.c vendored
View File

@ -50,6 +50,8 @@
/* Defined in hiredis.c */
void __redisSetError(redisContext *c, int type, const char *str);
int redisContextUpdateCommandTimeout(redisContext *c, const struct timeval *timeout);
void redisNetClose(redisContext *c) {
if (c && c->fd != REDIS_INVALID_FD) {
close(c->fd);
@ -68,7 +70,7 @@ ssize_t redisNetRead(redisContext *c, char *buf, size_t bufcap) {
__redisSetError(c, REDIS_ERR_TIMEOUT, "recv timeout");
return -1;
} else {
__redisSetError(c, REDIS_ERR_IO, NULL);
__redisSetError(c, REDIS_ERR_IO, strerror(errno));
return -1;
}
} else if (nread == 0) {
@ -80,15 +82,19 @@ ssize_t redisNetRead(redisContext *c, char *buf, size_t bufcap) {
}
ssize_t redisNetWrite(redisContext *c) {
ssize_t nwritten = send(c->fd, c->obuf, hi_sdslen(c->obuf), 0);
ssize_t nwritten;
nwritten = send(c->fd, c->obuf, hi_sdslen(c->obuf), 0);
if (nwritten < 0) {
if ((errno == EWOULDBLOCK && !(c->flags & REDIS_BLOCK)) || (errno == EINTR)) {
/* Try again later */
/* Try again */
return 0;
} else {
__redisSetError(c, REDIS_ERR_IO, NULL);
__redisSetError(c, REDIS_ERR_IO, strerror(errno));
return -1;
}
}
return nwritten;
}
@ -166,6 +172,7 @@ int redisKeepAlive(redisContext *c, int interval) {
int val = 1;
redisFD fd = c->fd;
#ifndef _WIN32
if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)) == -1){
__redisSetError(c,REDIS_ERR_OTHER,strerror(errno));
return REDIS_ERR;
@ -199,7 +206,15 @@ int redisKeepAlive(redisContext *c, int interval) {
}
#endif
#endif
#else
int res;
res = win32_redisKeepAlive(fd, interval * 1000);
if (res != 0) {
__redisSetError(c, REDIS_ERR_OTHER, strerror(res));
return REDIS_ERR;
}
#endif
return REDIS_OK;
}
@ -213,6 +228,23 @@ int redisSetTcpNoDelay(redisContext *c) {
return REDIS_OK;
}
int redisContextSetTcpUserTimeout(redisContext *c, unsigned int timeout) {
int res;
#ifdef TCP_USER_TIMEOUT
res = setsockopt(c->fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &timeout, sizeof(timeout));
#else
res = -1;
errno = ENOTSUP;
(void)timeout;
#endif
if (res == -1) {
__redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(TCP_USER_TIMEOUT)");
redisNetClose(c);
return REDIS_ERR;
}
return REDIS_OK;
}
#define __MAX_MSEC (((LONG_MAX) - 999) / 1000)
static int redisContextTimeoutMsec(redisContext *c, long *result)
@ -223,6 +255,7 @@ static int redisContextTimeoutMsec(redisContext *c, long *result)
/* Only use timeout when not NULL. */
if (timeout != NULL) {
if (timeout->tv_usec > 1000000 || timeout->tv_sec > __MAX_MSEC) {
__redisSetError(c, REDIS_ERR_IO, "Invalid timeout specified");
*result = msec;
return REDIS_ERR;
}
@ -277,12 +310,28 @@ int redisCheckConnectDone(redisContext *c, int *completed) {
*completed = 1;
return REDIS_OK;
}
switch (errno) {
int error = errno;
if (error == EINPROGRESS) {
/* must check error to see if connect failed. Get the socket error */
int fail, so_error;
socklen_t optlen = sizeof(so_error);
fail = getsockopt(c->fd, SOL_SOCKET, SO_ERROR, &so_error, &optlen);
if (fail == 0) {
if (so_error == 0) {
/* Socket is connected! */
*completed = 1;
return REDIS_OK;
}
/* connection error; */
errno = so_error;
error = so_error;
}
}
switch (error) {
case EISCONN:
*completed = 1;
return REDIS_OK;
case EALREADY:
case EINPROGRESS:
case EWOULDBLOCK:
*completed = 0;
return REDIS_OK;
@ -317,6 +366,10 @@ int redisContextSetTimeout(redisContext *c, const struct timeval tv) {
const void *to_ptr = &tv;
size_t to_sz = sizeof(tv);
if (redisContextUpdateCommandTimeout(c, &tv) != REDIS_OK) {
__redisSetError(c, REDIS_ERR_OOM, "Out of memory");
return REDIS_ERR;
}
if (setsockopt(c->fd,SOL_SOCKET,SO_RCVTIMEO,to_ptr,to_sz) == -1) {
__redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(SO_RCVTIMEO)");
return REDIS_ERR;
@ -400,7 +453,6 @@ static int _redisContextConnectTcp(redisContext *c, const char *addr, int port,
}
if (redisContextTimeoutMsec(c, &timeout_msec) != REDIS_OK) {
__redisSetError(c, REDIS_ERR_IO, "Invalid timeout specified");
goto error;
}
@ -417,17 +469,25 @@ static int _redisContextConnectTcp(redisContext *c, const char *addr, int port,
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
/* Try with IPv6 if no IPv4 address was found. We do it in this order since
* in a Redis client you can't afford to test if you have IPv6 connectivity
* as this would add latency to every connect. Otherwise a more sensible
* route could be: Use IPv6 if both addresses are available and there is IPv6
* connectivity. */
if ((rv = getaddrinfo(c->tcp.host,_port,&hints,&servinfo)) != 0) {
hints.ai_family = AF_INET6;
if ((rv = getaddrinfo(addr,_port,&hints,&servinfo)) != 0) {
__redisSetError(c,REDIS_ERR_OTHER,gai_strerror(rv));
return REDIS_ERR;
}
/* DNS lookup. To use dual stack, set both flags to prefer both IPv4 and
* IPv6. By default, for historical reasons, we try IPv4 first and then we
* try IPv6 only if no IPv4 address was found. */
if (c->flags & REDIS_PREFER_IPV6 && c->flags & REDIS_PREFER_IPV4)
hints.ai_family = AF_UNSPEC;
else if (c->flags & REDIS_PREFER_IPV6)
hints.ai_family = AF_INET6;
else
hints.ai_family = AF_INET;
rv = getaddrinfo(c->tcp.host, _port, &hints, &servinfo);
if (rv != 0 && hints.ai_family != AF_UNSPEC) {
/* Try again with the other IP version. */
hints.ai_family = (hints.ai_family == AF_INET) ? AF_INET6 : AF_INET;
rv = getaddrinfo(c->tcp.host, _port, &hints, &servinfo);
}
if (rv != 0) {
__redisSetError(c, REDIS_ERR_OTHER, gai_strerror(rv));
return REDIS_ERR;
}
for (p = servinfo; p != NULL; p = p->ai_next) {
addrretry:

1
deps/hiredis/net.h vendored
View File

@ -52,5 +52,6 @@ int redisKeepAlive(redisContext *c, int interval);
int redisCheckConnectDone(redisContext *c, int *completed);
int redisSetTcpNoDelay(redisContext *c);
int redisContextSetTcpUserTimeout(redisContext *c, unsigned int timeout);
#endif

13
deps/hiredis/read.c vendored
View File

@ -303,11 +303,14 @@ static int processLineItem(redisReader *r) {
d = INFINITY; /* Positive infinite. */
} else if (len == 4 && strcasecmp(buf,"-inf") == 0) {
d = -INFINITY; /* Negative infinite. */
} else if ((len == 3 && strcasecmp(buf,"nan") == 0) ||
(len == 4 && strcasecmp(buf, "-nan") == 0)) {
d = NAN; /* nan. */
} else {
d = strtod((char*)buf,&eptr);
/* RESP3 only allows "inf", "-inf", and finite values, while
* strtod() allows other variations on infinity, NaN,
* etc. We explicity handle our two allowed infinite cases
* strtod() allows other variations on infinity,
* etc. We explicity handle our two allowed infinite cases and NaN
* above, so strtod() should only result in finite values. */
if (buf[0] == '\0' || eptr != &buf[len] || !isfinite(d)) {
__redisReaderSetError(r,REDIS_ERR_PROTOCOL,
@ -374,7 +377,7 @@ static int processLineItem(redisReader *r) {
if (r->fn && r->fn->createString)
obj = r->fn->createString(cur,p,len);
else
obj = (void*)(size_t)(cur->type);
obj = (void*)(uintptr_t)(cur->type);
}
if (obj == NULL) {
@ -439,7 +442,7 @@ static int processBulkItem(redisReader *r) {
if (r->fn && r->fn->createString)
obj = r->fn->createString(cur,s+2,len);
else
obj = (void*)(long)cur->type;
obj = (void*)(uintptr_t)cur->type;
success = 1;
}
}
@ -536,7 +539,7 @@ static int processAggregateItem(redisReader *r) {
if (r->fn && r->fn->createArray)
obj = r->fn->createArray(cur,elements);
else
obj = (void*)(long)cur->type;
obj = (void*)(uintptr_t)cur->type;
if (obj == NULL) {
__redisReaderSetErrorOOM(r);

29
deps/hiredis/sds.c vendored
View File

@ -90,6 +90,7 @@ hisds hi_sdsnewlen(const void *init, size_t initlen) {
int hdrlen = hi_sdsHdrSize(type);
unsigned char *fp; /* flags pointer. */
if (hdrlen+initlen+1 <= initlen) return NULL; /* Catch size_t overflow */
sh = hi_s_malloc(hdrlen+initlen+1);
if (sh == NULL) return NULL;
if (!init)
@ -174,7 +175,7 @@ void hi_sdsfree(hisds s) {
* the output will be "6" as the string was modified but the logical length
* remains 6 bytes. */
void hi_sdsupdatelen(hisds s) {
int reallen = strlen(s);
size_t reallen = strlen(s);
hi_sdssetlen(s, reallen);
}
@ -196,7 +197,7 @@ void hi_sdsclear(hisds s) {
hisds hi_sdsMakeRoomFor(hisds s, size_t addlen) {
void *sh, *newsh;
size_t avail = hi_sdsavail(s);
size_t len, newlen;
size_t len, newlen, reqlen;
char type, oldtype = s[-1] & HI_SDS_TYPE_MASK;
int hdrlen;
@ -205,7 +206,8 @@ hisds hi_sdsMakeRoomFor(hisds s, size_t addlen) {
len = hi_sdslen(s);
sh = (char*)s-hi_sdsHdrSize(oldtype);
newlen = (len+addlen);
reqlen = newlen = (len+addlen);
if (newlen <= len) return NULL; /* Catch size_t overflow */
if (newlen < HI_SDS_MAX_PREALLOC)
newlen *= 2;
else
@ -219,6 +221,7 @@ hisds hi_sdsMakeRoomFor(hisds s, size_t addlen) {
if (type == HI_SDS_TYPE_5) type = HI_SDS_TYPE_8;
hdrlen = hi_sdsHdrSize(type);
if (hdrlen+newlen+1 <= reqlen) return NULL; /* Catch size_t overflow */
if (oldtype==type) {
newsh = hi_s_realloc(sh, hdrlen+newlen+1);
if (newsh == NULL) return NULL;
@ -580,7 +583,7 @@ hisds hi_sdscatprintf(hisds s, const char *fmt, ...) {
*/
hisds hi_sdscatfmt(hisds s, char const *fmt, ...) {
const char *f = fmt;
int i;
long i;
va_list ap;
va_start(ap,fmt);
@ -753,16 +756,16 @@ int hi_sdsrange(hisds s, ssize_t start, ssize_t end) {
return 0;
}
/* Apply tolower() to every character of the hisds string 's'. */
/* Apply tolower() to every character of the sds string 's'. */
void hi_sdstolower(hisds s) {
int len = hi_sdslen(s), j;
size_t len = hi_sdslen(s), j;
for (j = 0; j < len; j++) s[j] = tolower(s[j]);
}
/* Apply toupper() to every character of the hisds string 's'. */
/* Apply toupper() to every character of the sds string 's'. */
void hi_sdstoupper(hisds s) {
int len = hi_sdslen(s), j;
size_t len = hi_sdslen(s), j;
for (j = 0; j < len; j++) s[j] = toupper(s[j]);
}
@ -945,7 +948,7 @@ hisds *hi_sdssplitargs(const char *line, int *argc) {
*argc = 0;
while(1) {
/* skip blanks */
while(*p && isspace(*p)) p++;
while(*p && isspace((int) *p)) p++;
if (*p) {
/* get a token */
int inq=0; /* set to 1 if we are in "quotes" */
@ -956,8 +959,8 @@ hisds *hi_sdssplitargs(const char *line, int *argc) {
while(!done) {
if (inq) {
if (*p == '\\' && *(p+1) == 'x' &&
isxdigit(*(p+2)) &&
isxdigit(*(p+3)))
isxdigit((int) *(p+2)) &&
isxdigit((int) *(p+3)))
{
unsigned char byte;
@ -981,7 +984,7 @@ hisds *hi_sdssplitargs(const char *line, int *argc) {
} else if (*p == '"') {
/* closing quote must be followed by a space or
* nothing at all. */
if (*(p+1) && !isspace(*(p+1))) goto err;
if (*(p+1) && !isspace((int) *(p+1))) goto err;
done=1;
} else if (!*p) {
/* unterminated quotes */
@ -996,7 +999,7 @@ hisds *hi_sdssplitargs(const char *line, int *argc) {
} else if (*p == '\'') {
/* closing quote must be followed by a space or
* nothing at all. */
if (*(p+1) && !isspace(*(p+1))) goto err;
if (*(p+1) && !isspace((int) *(p+1))) goto err;
done=1;
} else if (!*p) {
/* unterminated quotes */

4
deps/hiredis/sds.h vendored
View File

@ -35,9 +35,11 @@
#define HI_SDS_MAX_PREALLOC (1024*1024)
#ifdef _MSC_VER
#define __attribute__(x)
typedef long long ssize_t;
#define SSIZE_MAX (LLONG_MAX >> 1)
#ifndef __clang__
#define __attribute__(x)
#endif
#endif
#include <sys/types.h>

View File

@ -180,10 +180,17 @@ int win32_connect(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen)
/* For Winsock connect(), the WSAEWOULDBLOCK error means the same thing as
* EINPROGRESS for POSIX connect(), so we do that translation to keep POSIX
* logic consistent. */
if (errno == EWOULDBLOCK) {
* logic consistent.
* Additionally, WSAALREADY is can be reported as WSAEINVAL to and this is
* translated to EIO. Convert appropriately
*/
int err = errno;
if (err == EWOULDBLOCK) {
errno = EINPROGRESS;
}
else if (err == EIO) {
errno = EALREADY;
}
return ret != SOCKET_ERROR ? ret : -1;
}
@ -205,6 +212,14 @@ int win32_getsockopt(SOCKET sockfd, int level, int optname, void *optval, sockle
} else {
ret = getsockopt(sockfd, level, optname, (char*)optval, optlen);
}
if (ret != SOCKET_ERROR && level == SOL_SOCKET && optname == SO_ERROR) {
/* translate SO_ERROR codes, if non-zero */
int err = *(int*)optval;
if (err != 0) {
err = _wsaErrorToErrno(err);
*(int*)optval = err;
}
}
_updateErrno(ret != SOCKET_ERROR);
return ret != SOCKET_ERROR ? ret : -1;
}
@ -245,4 +260,21 @@ int win32_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
_updateErrno(ret != SOCKET_ERROR);
return ret != SOCKET_ERROR ? ret : -1;
}
int win32_redisKeepAlive(SOCKET sockfd, int interval_ms) {
struct tcp_keepalive cfg;
DWORD bytes_in;
int res;
cfg.onoff = 1;
cfg.keepaliveinterval = interval_ms;
cfg.keepalivetime = interval_ms;
res = WSAIoctl(sockfd, SIO_KEEPALIVE_VALS, &cfg,
sizeof(struct tcp_keepalive), NULL, 0,
&bytes_in, NULL, NULL);
return res == 0 ? 0 : _wsaErrorToErrno(res);
}
#endif /* _WIN32 */

View File

@ -50,6 +50,7 @@
#include <ws2tcpip.h>
#include <stddef.h>
#include <errno.h>
#include <mstcpip.h>
#ifdef _MSC_VER
typedef long long ssize_t;
@ -71,6 +72,8 @@ ssize_t win32_send(SOCKET sockfd, const void *buf, size_t len, int flags);
typedef ULONG nfds_t;
int win32_poll(struct pollfd *fds, nfds_t nfds, int timeout);
int win32_redisKeepAlive(SOCKET sockfd, int interval_ms);
#ifndef REDIS_SOCKCOMPAT_IMPLEMENTATION
#define getaddrinfo(node, service, hints, res) win32_getaddrinfo(node, service, hints, res)
#undef gai_strerror

56
deps/hiredis/ssl.c vendored
View File

@ -32,6 +32,7 @@
#include "hiredis.h"
#include "async.h"
#include "net.h"
#include <assert.h>
#include <errno.h>
@ -39,6 +40,14 @@
#ifdef _WIN32
#include <windows.h>
#include <wincrypt.h>
#ifdef OPENSSL_IS_BORINGSSL
#undef X509_NAME
#undef X509_EXTENSIONS
#undef PKCS7_ISSUER_AND_SERIAL
#undef PKCS7_SIGNER_INFO
#undef OCSP_REQUEST
#undef OCSP_RESPONSE
#endif
#else
#include <pthread.h>
#endif
@ -50,6 +59,8 @@
#include "async_private.h"
#include "hiredis_ssl.h"
#define OPENSSL_1_1_0 0x10100000L
void __redisSetError(redisContext *c, int type, const char *str);
struct redisSSLContext {
@ -91,7 +102,7 @@ redisContextFuncs redisContextSSLFuncs;
* Note that this is only required for OpenSSL < 1.1.0.
*/
#if OPENSSL_VERSION_NUMBER < 0x10100000L
#if OPENSSL_VERSION_NUMBER < OPENSSL_1_1_0
#define HIREDIS_USE_CRYPTO_LOCKS
#endif
@ -219,6 +230,25 @@ redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *
const char *cert_filename, const char *private_key_filename,
const char *server_name, redisSSLContextError *error)
{
redisSSLOptions options = {
.cacert_filename = cacert_filename,
.capath = capath,
.cert_filename = cert_filename,
.private_key_filename = private_key_filename,
.server_name = server_name,
.verify_mode = REDIS_SSL_VERIFY_PEER,
};
return redisCreateSSLContextWithOptions(&options, error);
}
redisSSLContext *redisCreateSSLContextWithOptions(redisSSLOptions *options, redisSSLContextError *error) {
const char *cacert_filename = options->cacert_filename;
const char *capath = options->capath;
const char *cert_filename = options->cert_filename;
const char *private_key_filename = options->private_key_filename;
const char *server_name = options->server_name;
#ifdef _WIN32
HCERTSTORE win_store = NULL;
PCCERT_CONTEXT win_ctx = NULL;
@ -228,14 +258,26 @@ redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *
if (ctx == NULL)
goto error;
ctx->ssl_ctx = SSL_CTX_new(SSLv23_client_method());
const SSL_METHOD *ssl_method;
#if OPENSSL_VERSION_NUMBER >= OPENSSL_1_1_0
ssl_method = TLS_client_method();
#else
ssl_method = SSLv23_client_method();
#endif
ctx->ssl_ctx = SSL_CTX_new(ssl_method);
if (!ctx->ssl_ctx) {
if (error) *error = REDIS_SSL_CTX_CREATE_FAILED;
goto error;
}
SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);
SSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER, NULL);
#if OPENSSL_VERSION_NUMBER >= OPENSSL_1_1_0
SSL_CTX_set_min_proto_version(ctx->ssl_ctx, TLS1_2_VERSION);
#else
SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1);
#endif
SSL_CTX_set_verify(ctx->ssl_ctx, options->verify_mode, NULL);
if ((cert_filename != NULL && private_key_filename == NULL) ||
(private_key_filename != NULL && cert_filename == NULL)) {
@ -273,6 +315,11 @@ redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *
if (error) *error = REDIS_SSL_CTX_CA_CERT_LOAD_FAILED;
goto error;
}
} else {
if (!SSL_CTX_set_default_verify_paths(ctx->ssl_ctx)) {
if (error) *error = REDIS_SSL_CTX_CLIENT_DEFAULT_CERT_FAILED;
goto error;
}
}
if (cert_filename) {
@ -560,6 +607,7 @@ static void redisSSLAsyncWrite(redisAsyncContext *ac) {
}
redisContextFuncs redisContextSSLFuncs = {
.close = redisNetClose,
.free_privctx = redisSSLFree,
.async_read = redisSSLAsyncRead,
.async_write = redisSSLAsyncWrite,

430
deps/hiredis/test.c vendored
View File

@ -15,6 +15,7 @@
#include "hiredis.h"
#include "async.h"
#include "adapters/poll.h"
#ifdef HIREDIS_TEST_SSL
#include "hiredis_ssl.h"
#endif
@ -34,11 +35,11 @@ enum connection_type {
struct config {
enum connection_type type;
struct timeval connect_timeout;
struct {
const char *host;
int port;
struct timeval timeout;
} tcp;
struct {
@ -75,6 +76,15 @@ static int tests = 0, fails = 0, skips = 0;
#define test_cond(_c) if(_c) printf("\033[0;32mPASSED\033[0;0m\n"); else {printf("\033[0;31mFAILED\033[0;0m\n"); fails++;}
#define test_skipped() { printf("\033[01;33mSKIPPED\033[0;0m\n"); skips++; }
static void millisleep(int ms)
{
#ifdef _MSC_VER
Sleep(ms);
#else
usleep(ms*1000);
#endif
}
static long long usec(void) {
#ifndef _MSC_VER
struct timeval tv;
@ -329,10 +339,14 @@ static void test_format_commands(void) {
FLOAT_WIDTH_TEST(float);
FLOAT_WIDTH_TEST(double);
test("Format command with invalid printf format: ");
test("Format command with unhandled printf format (specifier 'p' not supported): ");
len = redisFormatCommand(&cmd,"key:%08p %b",(void*)1234,"foo",(size_t)3);
test_cond(len == -1);
test("Format command with invalid printf format (specifier missing): ");
len = redisFormatCommand(&cmd,"%-");
test_cond(len == -1);
const char *argv[3];
argv[0] = "SET";
argv[1] = "foo\0xxx";
@ -391,6 +405,25 @@ static void test_append_formatted_commands(struct config config) {
disconnect(c, 0);
}
static void test_tcp_options(struct config cfg) {
redisContext *c;
c = do_connect(cfg);
test("We can enable TCP_KEEPALIVE: ");
test_cond(redisEnableKeepAlive(c) == REDIS_OK);
#ifdef TCP_USER_TIMEOUT
test("We can set TCP_USER_TIMEOUT: ");
test_cond(redisSetTcpUserTimeout(c, 100) == REDIS_OK);
#else
test("Setting TCP_USER_TIMEOUT errors when unsupported: ");
test_cond(redisSetTcpUserTimeout(c, 100) == REDIS_ERR && c->err == REDIS_ERR_IO);
#endif
redisFree(c);
}
static void test_reply_reader(void) {
redisReader *reader;
void *reply, *root;
@ -568,6 +601,19 @@ static void test_reply_reader(void) {
test_cond(ret == REDIS_ERR && reply == NULL);
redisReaderFree(reader);
test("Don't reset state after protocol error(not segfault): ");
reader = redisReaderCreate();
redisReaderFeed(reader,(char*)"*3\r\n$3\r\nSET\r\n$5\r\nhello\r\n$", 25);
ret = redisReaderGetReply(reader,&reply);
assert(ret == REDIS_OK);
redisReaderFeed(reader,(char*)"3\r\nval\r\n", 8);
ret = redisReaderGetReply(reader,&reply);
test_cond(ret == REDIS_OK &&
((redisReply*)reply)->type == REDIS_REPLY_ARRAY &&
((redisReply*)reply)->elements == 3);
freeReplyObject(reply);
redisReaderFree(reader);
/* Regression test for issue #45 on GitHub. */
test("Don't do empty allocation for empty multi bulk: ");
reader = redisReaderCreate();
@ -637,12 +683,23 @@ static void test_reply_reader(void) {
freeReplyObject(reply);
redisReaderFree(reader);
test("Set error when RESP3 double is NaN: ");
test("Correctly parses RESP3 double NaN: ");
reader = redisReaderCreate();
redisReaderFeed(reader, ",nan\r\n",6);
ret = redisReaderGetReply(reader,&reply);
test_cond(ret == REDIS_ERR &&
strcasecmp(reader->errstr,"Bad double value") == 0);
test_cond(ret == REDIS_OK &&
((redisReply*)reply)->type == REDIS_REPLY_DOUBLE &&
isnan(((redisReply*)reply)->dval));
freeReplyObject(reply);
redisReaderFree(reader);
test("Correctly parses RESP3 double -Nan: ");
reader = redisReaderCreate();
redisReaderFeed(reader, ",-nan\r\n", 7);
ret = redisReaderGetReply(reader, &reply);
test_cond(ret == REDIS_OK &&
((redisReply*)reply)->type == REDIS_REPLY_DOUBLE &&
isnan(((redisReply*)reply)->dval));
freeReplyObject(reply);
redisReaderFree(reader);
@ -745,6 +802,20 @@ static void test_reply_reader(void) {
!strcmp(((redisReply*)reply)->str,"3492890328409238509324850943850943825024385"));
freeReplyObject(reply);
redisReaderFree(reader);
test("Can parse RESP3 doubles in an array: ");
reader = redisReaderCreate();
redisReaderFeed(reader, "*1\r\n,3.14159265358979323846\r\n",31);
ret = redisReaderGetReply(reader,&reply);
test_cond(ret == REDIS_OK &&
((redisReply*)reply)->type == REDIS_REPLY_ARRAY &&
((redisReply*)reply)->elements == 1 &&
((redisReply*)reply)->element[0]->type == REDIS_REPLY_DOUBLE &&
fabs(((redisReply*)reply)->element[0]->dval - 3.14159265358979323846) < 0.00000001 &&
((redisReply*)reply)->element[0]->len == 22 &&
strcmp(((redisReply*)reply)->element[0]->str, "3.14159265358979323846") == 0);
freeReplyObject(reply);
redisReaderFree(reader);
}
static void test_free_null(void) {
@ -819,9 +890,9 @@ static void test_allocator_injection(void) {
#define HIREDIS_BAD_DOMAIN "idontexist-noreally.com"
static void test_blocking_connection_errors(void) {
redisContext *c;
struct addrinfo hints = {.ai_family = AF_INET};
struct addrinfo *ai_tmp = NULL;
redisContext *c;
int rv = getaddrinfo(HIREDIS_BAD_DOMAIN, "6379", &hints, &ai_tmp);
if (rv != 0) {
@ -835,6 +906,7 @@ static void test_blocking_connection_errors(void) {
strcmp(c->errstr, "Can't resolve: " HIREDIS_BAD_DOMAIN) == 0 ||
strcmp(c->errstr, "Name does not resolve") == 0 ||
strcmp(c->errstr, "nodename nor servname provided, or not known") == 0 ||
strcmp(c->errstr, "node name or service name not known") == 0 ||
strcmp(c->errstr, "No address associated with hostname") == 0 ||
strcmp(c->errstr, "Temporary failure in name resolution") == 0 ||
strcmp(c->errstr, "hostname nor servname provided, or not known") == 0 ||
@ -847,12 +919,26 @@ static void test_blocking_connection_errors(void) {
}
#ifndef _WIN32
redisOptions opt = {0};
struct timeval tv;
test("Returns error when the port is not open: ");
c = redisConnect((char*)"localhost", 1);
test_cond(c->err == REDIS_ERR_IO &&
strcmp(c->errstr,"Connection refused") == 0);
redisFree(c);
/* Verify we don't regress from the fix in PR #1180 */
test("We don't clobber connection exception with setsockopt error: ");
tv = (struct timeval){.tv_sec = 0, .tv_usec = 500000};
opt.command_timeout = opt.connect_timeout = &tv;
REDIS_OPTIONS_SET_TCP(&opt, "localhost", 10337);
c = redisConnectWithOptions(&opt);
test_cond(c->err == REDIS_ERR_IO &&
strcmp(c->errstr, "Connection refused") == 0);
redisFree(c);
test("Returns error when the unix_sock socket path doesn't accept connections: ");
c = redisConnectUnix((char*)"/tmp/idontexist.sock");
test_cond(c->err == REDIS_ERR_IO); /* Don't care about the message... */
@ -914,11 +1000,19 @@ static void test_resp3_push_handler(redisContext *c) {
old = redisSetPushCallback(c, push_handler);
test("We can set a custom RESP3 PUSH handler: ");
reply = redisCommand(c, "SET key:0 val:0");
/* We need another command because depending on the version of Redis, the
* notification may be delivered after the command's reply. */
assert(reply != NULL);
freeReplyObject(reply);
reply = redisCommand(c, "PING");
test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && pc.str == 1);
freeReplyObject(reply);
test("We properly handle a NIL invalidation payload: ");
reply = redisCommand(c, "FLUSHDB");
assert(reply != NULL);
freeReplyObject(reply);
reply = redisCommand(c, "PING");
test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && pc.nil == 1);
freeReplyObject(reply);
@ -929,6 +1023,12 @@ static void test_resp3_push_handler(redisContext *c) {
assert((reply = redisCommand(c, "GET key:0")) != NULL);
freeReplyObject(reply);
assert((reply = redisCommand(c, "SET key:0 invalid")) != NULL);
/* Depending on Redis version, we may receive either push notification or
* status reply. Both cases are valid. */
if (reply->type == REDIS_REPLY_STATUS) {
freeReplyObject(reply);
reply = redisCommand(c, "PING");
}
test_cond(reply->type == REDIS_REPLY_PUSH);
freeReplyObject(reply);
@ -1089,6 +1189,13 @@ static void test_blocking_connection(struct config config) {
strcasecmp(reply->element[1]->str,"pong") == 0);
freeReplyObject(reply);
test("Send command by passing argc/argv: ");
const char *argv[3] = {"SET", "foo", "bar"};
size_t argvlen[3] = {3, 3, 3};
reply = redisCommandArgv(c,3,argv,argvlen);
test_cond(reply->type == REDIS_REPLY_STATUS);
freeReplyObject(reply);
/* Make sure passing NULL to redisGetReply is safe */
test("Can pass NULL to redisGetReply: ");
assert(redisAppendCommand(c, "PING") == REDIS_OK);
@ -1143,7 +1250,13 @@ static void test_blocking_connection_timeouts(struct config config) {
test("Does not return a reply when the command times out: ");
if (detect_debug_sleep(c)) {
redisAppendFormattedCommand(c, sleep_cmd, strlen(sleep_cmd));
// flush connection buffer without waiting for the reply
s = c->funcs->write(c);
assert(s == (ssize_t)hi_sdslen(c->obuf));
hi_sdsfree(c->obuf);
c->obuf = hi_sdsempty();
tv.tv_sec = 0;
tv.tv_usec = 10000;
redisSetTimeout(c, tv);
@ -1156,6 +1269,9 @@ static void test_blocking_connection_timeouts(struct config config) {
strcmp(c->errstr, "recv timeout") == 0);
#endif
freeReplyObject(reply);
// wait for the DEBUG SLEEP to complete so that Redis server is unblocked for the following tests
millisleep(3000);
} else {
test_skipped();
}
@ -1226,22 +1342,34 @@ static void test_blocking_io_errors(struct config config) {
static void test_invalid_timeout_errors(struct config config) {
redisContext *c;
test("Set error when an invalid timeout usec value is given to redisConnectWithTimeout: ");
test("Set error when an invalid timeout usec value is used during connect: ");
config.tcp.timeout.tv_sec = 0;
config.tcp.timeout.tv_usec = 10000001;
config.connect_timeout.tv_sec = 0;
config.connect_timeout.tv_usec = 10000001;
c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout);
if (config.type == CONN_TCP || config.type == CONN_SSL) {
c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.connect_timeout);
} else if(config.type == CONN_UNIX) {
c = redisConnectUnixWithTimeout(config.unix_sock.path, config.connect_timeout);
} else {
assert(NULL);
}
test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0);
redisFree(c);
test("Set error when an invalid timeout sec value is given to redisConnectWithTimeout: ");
test("Set error when an invalid timeout sec value is used during connect: ");
config.tcp.timeout.tv_sec = (((LONG_MAX) - 999) / 1000) + 1;
config.tcp.timeout.tv_usec = 0;
config.connect_timeout.tv_sec = (((LONG_MAX) - 999) / 1000) + 1;
config.connect_timeout.tv_usec = 0;
c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout);
if (config.type == CONN_TCP || config.type == CONN_SSL) {
c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.connect_timeout);
} else if(config.type == CONN_UNIX) {
c = redisConnectUnixWithTimeout(config.unix_sock.path, config.connect_timeout);
} else {
assert(NULL);
}
test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0);
redisFree(c);
@ -1448,6 +1576,9 @@ static void test_throughput(struct config config) {
// }
#ifdef HIREDIS_TEST_ASYNC
#pragma GCC diagnostic ignored "-Woverlength-strings" /* required on gcc 4.8.x due to assert statements */
struct event_base *base;
typedef struct TestState {
@ -1729,10 +1860,14 @@ void subscribe_channel_a_cb(redisAsyncContext *ac, void *r, void *privdata) {
strcmp(reply->element[2]->str,"Hello!") == 0);
state->checkpoint++;
/* Unsubscribe to channels, including a channel X which we don't subscribe to */
/* Unsubscribe to channels, including channel X & Z which we don't subscribe to */
redisAsyncCommand(ac,unexpected_cb,
(void*)"unsubscribe should not call unexpected_cb()",
"unsubscribe B X A");
"unsubscribe B X A A Z");
/* Unsubscribe to patterns, none which we subscribe to */
redisAsyncCommand(ac,unexpected_cb,
(void*)"punsubscribe should not call unexpected_cb()",
"punsubscribe");
/* Send a regular command after unsubscribing, then disconnect */
state->disconnect = 1;
redisAsyncCommand(ac,integer_cb,state,"LPUSH mylist foo");
@ -1749,6 +1884,7 @@ void subscribe_channel_a_cb(redisAsyncContext *ac, void *r, void *privdata) {
void subscribe_channel_b_cb(redisAsyncContext *ac, void *r, void *privdata) {
redisReply *reply = r;
TestState *state = privdata;
(void)ac;
assert(reply != NULL && reply->type == REDIS_REPLY_ARRAY &&
reply->elements == 3);
@ -1767,8 +1903,10 @@ void subscribe_channel_b_cb(redisAsyncContext *ac, void *r, void *privdata) {
/* Test handling of multiple channels
* - subscribe to channel A and B
* - a published message on A triggers an unsubscribe of channel B, X and A
* where channel X is not subscribed to.
* - a published message on A triggers an unsubscribe of channel B, X, A and Z
* where channel X and Z are not subscribed to.
* - the published message also triggers an unsubscribe to patterns. Since no
* pattern is subscribed to the responded pattern element type is NIL.
* - a command sent after unsubscribe triggers a disconnect */
static void test_pubsub_multiple_channels(struct config config) {
test("Subscribe to multiple channels: ");
@ -1881,6 +2019,250 @@ static void test_monitor(struct config config) {
}
#endif /* HIREDIS_TEST_ASYNC */
/* tests for async api using polling adapter, requires no extra libraries*/
/* enum for the test cases, the callbacks have different logic based on them */
typedef enum astest_no
{
ASTEST_CONNECT=0,
ASTEST_CONN_TIMEOUT,
ASTEST_PINGPONG,
ASTEST_PINGPONG_TIMEOUT,
ASTEST_ISSUE_931,
ASTEST_ISSUE_931_PING
}astest_no;
/* a static context for the async tests */
struct _astest {
redisAsyncContext *ac;
astest_no testno;
int counter;
int connects;
int connect_status;
int disconnects;
int pongs;
int disconnect_status;
int connected;
int err;
char errstr[256];
};
static struct _astest astest;
/* async callbacks */
static void asCleanup(void* data)
{
struct _astest *t = (struct _astest *)data;
t->ac = NULL;
}
static void commandCallback(struct redisAsyncContext *ac, void* _reply, void* _privdata);
static void connectCallback(redisAsyncContext *c, int status) {
struct _astest *t = (struct _astest *)c->data;
assert(t == &astest);
assert(t->connects == 0);
t->err = c->err;
strcpy(t->errstr, c->errstr);
t->connects++;
t->connect_status = status;
t->connected = status == REDIS_OK ? 1 : -1;
if (t->testno == ASTEST_ISSUE_931) {
/* disconnect again */
redisAsyncDisconnect(c);
}
else if (t->testno == ASTEST_ISSUE_931_PING)
{
redisAsyncCommand(c, commandCallback, NULL, "PING");
}
}
static void disconnectCallback(const redisAsyncContext *c, int status) {
assert(c->data == (void*)&astest);
assert(astest.disconnects == 0);
astest.err = c->err;
strcpy(astest.errstr, c->errstr);
astest.disconnects++;
astest.disconnect_status = status;
astest.connected = 0;
}
static void commandCallback(struct redisAsyncContext *ac, void* _reply, void* _privdata)
{
redisReply *reply = (redisReply*)_reply;
struct _astest *t = (struct _astest *)ac->data;
assert(t == &astest);
(void)_privdata;
t->err = ac->err;
strcpy(t->errstr, ac->errstr);
t->counter++;
if (t->testno == ASTEST_PINGPONG ||t->testno == ASTEST_ISSUE_931_PING)
{
assert(reply != NULL && reply->type == REDIS_REPLY_STATUS && strcmp(reply->str, "PONG") == 0);
t->pongs++;
redisAsyncFree(ac);
}
if (t->testno == ASTEST_PINGPONG_TIMEOUT)
{
/* two ping pongs */
assert(reply != NULL && reply->type == REDIS_REPLY_STATUS && strcmp(reply->str, "PONG") == 0);
t->pongs++;
if (t->counter == 1) {
int status = redisAsyncCommand(ac, commandCallback, NULL, "PING");
assert(status == REDIS_OK);
} else {
redisAsyncFree(ac);
}
}
}
static redisAsyncContext *do_aconnect(struct config config, astest_no testno)
{
redisOptions options = {0};
memset(&astest, 0, sizeof(astest));
astest.testno = testno;
astest.connect_status = astest.disconnect_status = -2;
if (config.type == CONN_TCP) {
options.type = REDIS_CONN_TCP;
options.connect_timeout = &config.connect_timeout;
REDIS_OPTIONS_SET_TCP(&options, config.tcp.host, config.tcp.port);
} else if (config.type == CONN_SSL) {
options.type = REDIS_CONN_TCP;
options.connect_timeout = &config.connect_timeout;
REDIS_OPTIONS_SET_TCP(&options, config.ssl.host, config.ssl.port);
} else if (config.type == CONN_UNIX) {
options.type = REDIS_CONN_UNIX;
options.endpoint.unix_socket = config.unix_sock.path;
} else if (config.type == CONN_FD) {
options.type = REDIS_CONN_USERFD;
/* Create a dummy connection just to get an fd to inherit */
redisContext *dummy_ctx = redisConnectUnix(config.unix_sock.path);
if (dummy_ctx) {
redisFD fd = disconnect(dummy_ctx, 1);
printf("Connecting to inherited fd %d\n", (int)fd);
options.endpoint.fd = fd;
}
}
redisAsyncContext *c = redisAsyncConnectWithOptions(&options);
assert(c);
astest.ac = c;
c->data = &astest;
c->dataCleanup = asCleanup;
redisPollAttach(c);
redisAsyncSetConnectCallbackNC(c, connectCallback);
redisAsyncSetDisconnectCallback(c, disconnectCallback);
return c;
}
static void as_printerr(void) {
printf("Async err %d : %s\n", astest.err, astest.errstr);
}
#define ASASSERT(e) do { \
if (!(e)) \
as_printerr(); \
assert(e); \
} while (0);
static void test_async_polling(struct config config) {
int status;
redisAsyncContext *c;
struct config defaultconfig = config;
test("Async connect: ");
c = do_aconnect(config, ASTEST_CONNECT);
assert(c);
while(astest.connected == 0)
redisPollTick(c, 0.1);
assert(astest.connects == 1);
ASASSERT(astest.connect_status == REDIS_OK);
assert(astest.disconnects == 0);
test_cond(astest.connected == 1);
test("Async free after connect: ");
assert(astest.ac != NULL);
redisAsyncFree(c);
assert(astest.disconnects == 1);
assert(astest.ac == NULL);
test_cond(astest.disconnect_status == REDIS_OK);
if (config.type == CONN_TCP || config.type == CONN_SSL) {
/* timeout can only be simulated with network */
test("Async connect timeout: ");
config.tcp.host = "192.168.254.254"; /* blackhole ip */
config.connect_timeout.tv_usec = 100000;
c = do_aconnect(config, ASTEST_CONN_TIMEOUT);
assert(c);
assert(c->err == 0);
while(astest.connected == 0)
redisPollTick(c, 0.1);
assert(astest.connected == -1);
/*
* freeing should not be done, clearing should have happened.
*redisAsyncFree(c);
*/
assert(astest.ac == NULL);
test_cond(astest.connect_status == REDIS_ERR);
config = defaultconfig;
}
/* Test a ping/pong after connection */
test("Async PING/PONG: ");
c = do_aconnect(config, ASTEST_PINGPONG);
while(astest.connected == 0)
redisPollTick(c, 0.1);
status = redisAsyncCommand(c, commandCallback, NULL, "PING");
assert(status == REDIS_OK);
while(astest.ac)
redisPollTick(c, 0.1);
test_cond(astest.pongs == 1);
/* Test a ping/pong after connection that didn't time out.
* see https://github.com/redis/hiredis/issues/945
*/
if (config.type == CONN_TCP || config.type == CONN_SSL) {
test("Async PING/PONG after connect timeout: ");
config.connect_timeout.tv_usec = 10000; /* 10ms */
c = do_aconnect(config, ASTEST_PINGPONG_TIMEOUT);
while(astest.connected == 0)
redisPollTick(c, 0.1);
/* sleep 0.1 s, allowing old timeout to arrive */
millisleep(10);
status = redisAsyncCommand(c, commandCallback, NULL, "PING");
assert(status == REDIS_OK);
while(astest.ac)
redisPollTick(c, 0.1);
test_cond(astest.pongs == 2);
config = defaultconfig;
}
/* Test disconnect from an on_connect callback
* see https://github.com/redis/hiredis/issues/931
*/
test("Disconnect from onConnected callback (Issue #931): ");
c = do_aconnect(config, ASTEST_ISSUE_931);
while(astest.disconnects == 0)
redisPollTick(c, 0.1);
assert(astest.connected == 0);
assert(astest.connects == 1);
test_cond(astest.disconnects == 1);
/* Test ping/pong from an on_connect callback
* see https://github.com/redis/hiredis/issues/931
*/
test("Ping/Pong from onConnected callback (Issue #931): ");
c = do_aconnect(config, ASTEST_ISSUE_931_PING);
/* connect callback issues ping, reponse callback destroys context */
while(astest.ac)
redisPollTick(c, 0.1);
assert(astest.connected == 0);
assert(astest.connects == 1);
assert(astest.disconnects == 1);
test_cond(astest.pongs == 1);
}
/* End of Async polling_adapter driven tests */
int main(int argc, char **argv) {
struct config cfg = {
.tcp = {
@ -1963,6 +2345,7 @@ int main(int argc, char **argv) {
test_blocking_io_errors(cfg);
test_invalid_timeout_errors(cfg);
test_append_formatted_commands(cfg);
test_tcp_options(cfg);
if (throughput) test_throughput(cfg);
printf("\nTesting against Unix socket connection (%s): ", cfg.unix_sock.path);
@ -1972,6 +2355,7 @@ int main(int argc, char **argv) {
test_blocking_connection(cfg);
test_blocking_connection_timeouts(cfg);
test_blocking_io_errors(cfg);
test_invalid_timeout_errors(cfg);
if (throughput) test_throughput(cfg);
} else {
test_skipped();
@ -2000,6 +2384,7 @@ int main(int argc, char **argv) {
#endif
#ifdef HIREDIS_TEST_ASYNC
cfg.type = CONN_TCP;
printf("\nTesting asynchronous API against TCP connection (%s:%d):\n", cfg.tcp.host, cfg.tcp.port);
cfg.type = CONN_TCP;
@ -2017,6 +2402,15 @@ int main(int argc, char **argv) {
}
#endif /* HIREDIS_TEST_ASYNC */
cfg.type = CONN_TCP;
printf("\nTesting asynchronous API using polling_adapter TCP (%s:%d):\n", cfg.tcp.host, cfg.tcp.port);
test_async_polling(cfg);
if (test_unix_socket) {
cfg.type = CONN_UNIX;
printf("\nTesting asynchronous API using polling_adapter UNIX (%s):\n", cfg.unix_sock.path);
test_async_polling(cfg);
}
if (test_inherit_fd) {
printf("\nTesting against inherited fd (%s): ", cfg.unix_sock.path);
if (test_unix_socket) {

52
deps/hiredis/test.sh vendored
View File

@ -4,9 +4,17 @@ REDIS_SERVER=${REDIS_SERVER:-redis-server}
REDIS_PORT=${REDIS_PORT:-56379}
REDIS_SSL_PORT=${REDIS_SSL_PORT:-56443}
TEST_SSL=${TEST_SSL:-0}
SKIPS_AS_FAILS=${SKIPS_AS_FAILS-:0}
SKIPS_AS_FAILS=${SKIPS_AS_FAILS:-0}
ENABLE_DEBUG_CMD=
SSL_TEST_ARGS=
SKIPS_ARG=
SKIPS_ARG=${SKIPS_ARG:-}
REDIS_DOCKER=${REDIS_DOCKER:-}
# We need to enable the DEBUG command for redis-server >= 7.0.0
REDIS_MAJOR_VERSION="$(redis-server --version|awk -F'[^0-9]+' '{ print $2 }')"
if [ "$REDIS_MAJOR_VERSION" -gt "6" ]; then
ENABLE_DEBUG_CMD="enable-debug-command local"
fi
tmpdir=$(mktemp -d)
PID_FILE=${tmpdir}/hiredis-test-redis.pid
@ -43,20 +51,34 @@ if [ "$TEST_SSL" = "1" ]; then
fi
cleanup() {
set +e
kill $(cat ${PID_FILE})
if [ -n "${REDIS_DOCKER}" ] ; then
docker kill redis-test-server
else
set +e
kill $(cat ${PID_FILE})
fi
rm -rf ${tmpdir}
}
trap cleanup INT TERM EXIT
# base config
cat > ${tmpdir}/redis.conf <<EOF
daemonize yes
pidfile ${PID_FILE}
port ${REDIS_PORT}
bind 127.0.0.1
unixsocket ${SOCK_FILE}
unixsocketperm 777
EOF
# if not running in docker add these:
if [ ! -n "${REDIS_DOCKER}" ]; then
cat >> ${tmpdir}/redis.conf <<EOF
daemonize yes
${ENABLE_DEBUG_CMD}
bind 127.0.0.1
EOF
fi
# if doing ssl, add these
if [ "$TEST_SSL" = "1" ]; then
cat >> ${tmpdir}/redis.conf <<EOF
tls-port ${REDIS_SSL_PORT}
@ -66,13 +88,25 @@ tls-key-file ${SSL_KEY}
EOF
fi
echo ${tmpdir}
cat ${tmpdir}/redis.conf
${REDIS_SERVER} ${tmpdir}/redis.conf
if [ -n "${REDIS_DOCKER}" ] ; then
chmod a+wx ${tmpdir}
chmod a+r ${tmpdir}/*
docker run -d --rm --name redis-test-server \
-p ${REDIS_PORT}:${REDIS_PORT} \
-p ${REDIS_SSL_PORT}:${REDIS_SSL_PORT} \
-v ${tmpdir}:${tmpdir} \
${REDIS_DOCKER} \
redis-server ${tmpdir}/redis.conf
else
${REDIS_SERVER} ${tmpdir}/redis.conf
fi
# Wait until we detect the unix socket
echo waiting for server
while [ ! -S "${SOCK_FILE}" ]; do sleep 1; done
# Treat skips as failures if directed
[ "$SKIPS_AS_FAILS" = 1 ] && SKIPS_ARG="--skips-as-fails"
[ "$SKIPS_AS_FAILS" = 1 ] && SKIPS_ARG="${SKIPS_ARG} --skips-as-fails"
${TEST_PREFIX:-} ./hiredis-test -h 127.0.0.1 -p ${REDIS_PORT} -s ${SOCK_FILE} ${SSL_TEST_ARGS} ${SKIPS_ARG}

View File

@ -31,7 +31,6 @@ install:
- set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%
- if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC%
- if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc
- pacman --noconfirm -Suy mingw-w64-%CPU%-make
build_script:
- bash -c "autoconf"

View File

@ -3,18 +3,43 @@ env:
ARCH: amd64
task:
matrix:
env:
DEBUG_CONFIG: --enable-debug
env:
DEBUG_CONFIG: --disable-debug
matrix:
- env:
PROF_CONFIG: --enable-prof
- env:
PROF_CONFIG: --disable-prof
matrix:
- name: 64-bit
env:
CC:
CXX:
- name: 32-bit
env:
CC: cc -m32
CXX: c++ -m32
matrix:
- env:
UNCOMMON_CONFIG:
- env:
UNCOMMON_CONFIG: --with-lg-page=16 --with-malloc-conf=tcache:false
freebsd_instance:
matrix:
image: freebsd-12-0-release-amd64
image: freebsd-11-2-release-amd64
image: freebsd-12-3-release-amd64
install_script:
- sed -i.bak -e 's,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/latest,' /etc/pkg/FreeBSD.conf
- pkg upgrade -y
- pkg install -y autoconf gmake
script:
- autoconf
#- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS
- ./configure
# We don't perfectly track freebsd stdlib.h definitions. This is fine when
# we count as a system header, but breaks otherwise, like during these
# tests.
- ./configure --with-jemalloc-prefix=ci_ ${DEBUG_CONFIG} ${PROF_CONFIG} ${UNCOMMON_CONFIG}
- export JFLAG=`sysctl -n kern.smp.cpus`
- gmake -j${JFLAG}
- gmake -j${JFLAG} tests

122
deps/jemalloc/.clang-format vendored Normal file
View File

@ -0,0 +1,122 @@
# jemalloc targets clang-format version 8. We include every option it supports
# here, but comment out the ones that aren't relevant for us.
---
# AccessModifierOffset: -2
AlignAfterOpenBracket: DontAlign
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Right
AlignOperands: false
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterReturnType: AllDefinitions
AlwaysBreakBeforeMultilineStrings: true
# AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# BreakAfterJavaFieldAnnotations: true
BreakBeforeBinaryOperators: NonAssignment
BreakBeforeBraces: Attach
BreakBeforeTernaryOperators: true
# BreakConstructorInitializers: BeforeColon
# BreakInheritanceList: BeforeColon
BreakStringLiterals: false
ColumnLimit: 80
# CommentPragmas: ''
# CompactNamespaces: true
# ConstructorInitializerAllOnOneLineOrOnePerLine: true
# ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros: [ ql_foreach, qr_foreach, ]
# IncludeBlocks: Preserve
# IncludeCategories:
# - Regex: '^<.*\.h(pp)?>'
# Priority: 1
# IncludeIsMainRegex: ''
IndentCaseLabels: false
IndentPPDirectives: AfterHash
IndentWidth: 4
IndentWrappedFunctionNames: false
# JavaImportGroups: []
# JavaScriptQuotes: Leave
# JavaScriptWrapImports: True
KeepEmptyLinesAtTheStartOfBlocks: false
Language: Cpp
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
# NamespaceIndentation: None
# ObjCBinPackProtocolList: Auto
# ObjCBlockIndentWidth: 2
# ObjCSpaceAfterProperty: false
# ObjCSpaceBeforeProtocolList: false
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
# PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
# RawStringFormats:
# - Language: TextProto
# Delimiters:
# - 'pb'
# - 'proto'
# EnclosingFunctions:
# - 'PARSE_TEXT_PROTO'
# BasedOnStyle: google
# - Language: Cpp
# Delimiters:
# - 'cc'
# - 'cpp'
# BasedOnStyle: llvm
# CanonicalDelimiter: 'cc'
ReflowComments: true
SortIncludes: false
SpaceAfterCStyleCast: false
# SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
# SpaceBeforeCpp11BracedList: false
# SpaceBeforeCtorInitializerColon: true
# SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
# SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
# SpacesInContainerLiterals: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
# Standard: Cpp11
# This is nominally supported in clang-format version 8, but not in the build
# used by some of the core jemalloc developers.
# StatementMacros: []
TabWidth: 8
UseTab: Never
...

View File

@ -13,6 +13,8 @@
/doc/jemalloc.html
/doc/jemalloc.3
/doc_internal/PROFILING_INTERNALS.pdf
/jemalloc.pc
/lib/
@ -50,6 +52,7 @@ test/include/test/jemalloc_test.h
test/include/test/jemalloc_test_defs.h
/test/integration/[A-Za-z]*
!/test/integration/cpp/
!/test/integration/[A-Za-z]*.*
/test/integration/*.[od]
/test/integration/*.out
@ -71,6 +74,11 @@ test/include/test/jemalloc_test_defs.h
/test/unit/*.[od]
/test/unit/*.out
/test/analyze/[A-Za-z]*
!/test/analyze/[A-Za-z]*.*
/test/analyze/*.[od]
/test/analyze/*.out
/VERSION
*.pdb

View File

@ -1,195 +1,413 @@
language: generic
dist: precise
# This config file is generated by ./scripts/gen_travis.py.
# Do not edit by hand.
matrix:
# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
# the software provided by 'generic' is simply not needed for our tests.
# Differences are explained here:
# https://docs.travis-ci.com/user/languages/minimal-and-generic/
language: minimal
dist: focal
jobs:
include:
- os: windows
arch: amd64
env: CC=gcc CXX=g++ EXTRA_CFLAGS="-fcommon"
- os: windows
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-fcommon"
- os: windows
arch: amd64
env: CC=cl.exe CXX=cl.exe
- os: windows
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes EXTRA_CFLAGS="-fcommon"
- os: windows
arch: amd64
env: CC=cl.exe CXX=cl.exe CONFIGURE_FLAGS="--enable-debug"
- os: windows
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-fcommon"
- os: windows
arch: amd64
env: CC=cl.exe CXX=cl.exe CROSS_COMPILE_32BIT=yes
- os: windows
arch: amd64
env: CC=cl.exe CXX=cl.exe CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-debug"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --enable-prof-libunwind"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16 --with-malloc-conf=tcache:false"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --enable-prof --enable-prof-libunwind"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --with-lg-page=16 --with-malloc-conf=tcache:false"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-debug"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --enable-prof-libunwind --with-lg-page=16 --with-malloc-conf=tcache:false"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-prof --enable-prof-libunwind"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--with-lg-page=16 --with-malloc-conf=tcache:false"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --enable-prof --enable-prof-libunwind --with-lg-page=16 --with-malloc-conf=tcache:false"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-debug --enable-prof --enable-prof-libunwind"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-debug --with-lg-page=16 --with-malloc-conf=tcache:false"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-prof --enable-prof-libunwind --with-lg-page=16 --with-malloc-conf=tcache:false"
- os: freebsd
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes CONFIGURE_FLAGS="--enable-debug --enable-prof --enable-prof-libunwind --with-lg-page=16 --with-malloc-conf=tcache:false"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=clang CXX=clang++ CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats --with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl --with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks --with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16 --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16 --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16 --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16 --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
arch: ppc64le
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: &gcc_multilib
apt:
packages:
- gcc-multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
- os: osx
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ CROSS_COMPILE_32BIT=yes EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-lg-page=16" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
arch: amd64
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds -Wno-unknown-warning-option -Wno-ignored-attributes -Wno-deprecated-declarations"
# Development build
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# --enable-expermental-smallocx:
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# Valgrind
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
addons:
apt:
packages:
- valgrind
before_install:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_install.sh
fi
before_script:
- autoconf
- scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS
- make -j3
- make -j3 tests
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_script.sh
else
scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
autoconf
# If COMPILER_FLAGS are not empty, add them to CC and CXX
./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS"} $CONFIGURE_FLAGS
make -j3
make -j3 tests
fi
script:
- make check
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/script.sh
else
make check
fi

View File

@ -4,6 +4,106 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 5.3.0 (May 6, 2022)
This release contains many speed and space optimizations, from micro
optimizations on common paths to rework of internal data structures and
locking schemes, and many more too detailed to list below. Multiple percent
of system level metric improvements were measured in tested production
workloads. The release has gone through large-scale production testing.
New features:
- Add the thread.idle mallctl which hints that the calling thread will be
idle for a nontrivial period of time. (@davidtgoldblatt)
- Allow small size classes to be the maximum size class to cache in the
thread-specific cache, through the opt.[lg_]tcache_max option. (@interwq,
@jordalgo)
- Make the behavior of realloc(ptr, 0) configurable with opt.zero_realloc.
(@davidtgoldblatt)
- Add 'make uninstall' support. (@sangshuduo, @Lapenkov)
- Support C++17 over-aligned allocation. (@marksantaniello)
- Add the thread.peak mallctl for approximate per-thread peak memory tracking.
(@davidtgoldblatt)
- Add interval-based stats output opt.stats_interval. (@interwq)
- Add prof.prefix to override filename prefixes for dumps. (@zhxchen17)
- Add high resolution timestamp support for profiling. (@tyroguru)
- Add the --collapsed flag to jeprof for flamegraph generation.
(@igorwwwwwwwwwwwwwwwwwwww)
- Add the --debug-syms-by-id option to jeprof for debug symbols discovery.
(@DeannaGelbart)
- Add the opt.prof_leak_error option to exit with error code when leak is
detected using opt.prof_final. (@yunxuo)
- Add opt.cache_oblivious as an runtime alternative to config.cache_oblivious.
(@interwq)
- Add mallctl interfaces:
+ opt.zero_realloc (@davidtgoldblatt)
+ opt.cache_oblivious (@interwq)
+ opt.prof_leak_error (@yunxuo)
+ opt.stats_interval (@interwq)
+ opt.stats_interval_opts (@interwq)
+ opt.tcache_max (@interwq)
+ opt.trust_madvise (@azat)
+ prof.prefix (@zhxchen17)
+ stats.zero_reallocs (@davidtgoldblatt)
+ thread.idle (@davidtgoldblatt)
+ thread.peak.{read,reset} (@davidtgoldblatt)
Bug fixes:
- Fix the synchronization around explicit tcache creation which could cause
invalid tcache identifiers. This regression was first released in 5.0.0.
(@yoshinorim, @davidtgoldblatt)
- Fix a profiling biasing issue which could cause incorrect heap usage and
object counts. This issue existed in all previous releases with the heap
profiling feature. (@davidtgoldblatt)
- Fix the order of stats counter updating on large realloc which could cause
failed assertions. This regression was first released in 5.0.0. (@azat)
- Fix the locking on the arena destroy mallctl, which could cause concurrent
arena creations to fail. This functionality was first introduced in 5.0.0.
(@interwq)
Portability improvements:
- Remove nothrow from system function declarations on macOS and FreeBSD.
(@davidtgoldblatt, @fredemmott, @leres)
- Improve overcommit and page alignment settings on NetBSD. (@zoulasc)
- Improve CPU affinity support on BSD platforms. (@devnexen)
- Improve utrace detection and support. (@devnexen)
- Improve QEMU support with MADV_DONTNEED zeroed pages detection. (@azat)
- Add memcntl support on Solaris / illumos. (@devnexen)
- Improve CPU_SPINWAIT on ARM. (@AWSjswinney)
- Improve TSD cleanup on FreeBSD. (@Lapenkov)
- Disable percpu_arena if the CPU count cannot be reliably detected. (@azat)
- Add malloc_size(3) override support. (@devnexen)
- Add mmap VM_MAKE_TAG support. (@devnexen)
- Add support for MADV_[NO]CORE. (@devnexen)
- Add support for DragonFlyBSD. (@devnexen)
- Fix the QUANTUM setting on MIPS64. (@brooksdavis)
- Add the QUANTUM setting for ARC. (@vineetgarc)
- Add the QUANTUM setting for LoongArch. (@wangjl-uos)
- Add QNX support. (@jqian-aurora)
- Avoid atexit(3) calls unless the relevant profiling features are enabled.
(@BusyJay, @laiwei-rice, @interwq)
- Fix unknown option detection when using Clang. (@Lapenkov)
- Fix symbol conflict with musl libc. (@georgthegreat)
- Add -Wimplicit-fallthrough checks. (@nickdesaulniers)
- Add __forceinline support on MSVC. (@santagada)
- Improve FreeBSD and Windows CI support. (@Lapenkov)
- Add CI support for PPC64LE architecture. (@ezeeyahoo)
Incompatible changes:
- Maximum size class allowed in tcache (opt.[lg_]tcache_max) now has an upper
bound of 8MiB. (@interwq)
Optimizations and refactors (@davidtgoldblatt, @Lapenkov, @interwq):
- Optimize the common cases of the thread cache operations.
- Optimize internal data structures, including RB tree and pairing heap.
- Optimize the internal locking on extent management.
- Extract and refactor the internal page allocator and interface modules.
Documentation:
- Fix doc build with --with-install-suffix. (@lawmurray, @interwq)
- Add PROFILING_INTERNALS.md. (@davidtgoldblatt)
- Ensure the proper order of doc building and installation. (@Mingli-Yu)
* 5.2.1 (August 5, 2019)
This release is primarily about Windows. A critical virtual memory leak is

View File

@ -9,14 +9,17 @@ If building from unpackaged developer sources, the simplest command sequence
that might work is:
./autogen.sh
make dist
make
make install
Note that documentation is not built by the default target because doing so
would create a dependency on xsltproc in packaged releases, hence the
requirement to either run 'make dist' or avoid installing docs via the various
install_* targets documented below.
You can uninstall the installed build artifacts like this:
make uninstall
Notes:
- "autoconf" needs to be installed
- Documentation is built by the default target only when xsltproc is
available. Build will warn but not stop if the dependency is missing.
## Advanced configuration
@ -188,13 +191,13 @@ any of the following arguments (not a definitive list) to 'configure':
* `--disable-cache-oblivious`
Disable cache-oblivious large allocation alignment for large allocation
requests with no alignment constraints. If this feature is disabled, all
large allocations are page-aligned as an implementation artifact, which can
severely harm CPU cache utilization. However, the cache-oblivious layout
comes at the cost of one extra page per large allocation, which in the
most extreme case increases physical memory usage for the 16 KiB size class
to 20 KiB.
Disable cache-oblivious large allocation alignment by default, for large
allocation requests with no alignment constraints. If this feature is
disabled, all large allocations are page-aligned as an implementation
artifact, which can severely harm CPU cache utilization. However, the
cache-oblivious layout comes at the cost of one extra page per large
allocation, which in the most extreme case increases physical memory usage
for the 16 KiB size class to 20 KiB.
* `--disable-syscall`

View File

@ -99,31 +99,60 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/background_thread.c \
$(srcroot)src/base.c \
$(srcroot)src/bin.c \
$(srcroot)src/bin_info.c \
$(srcroot)src/bitmap.c \
$(srcroot)src/buf_writer.c \
$(srcroot)src/cache_bin.c \
$(srcroot)src/ckh.c \
$(srcroot)src/counter.c \
$(srcroot)src/ctl.c \
$(srcroot)src/decay.c \
$(srcroot)src/div.c \
$(srcroot)src/ecache.c \
$(srcroot)src/edata.c \
$(srcroot)src/edata_cache.c \
$(srcroot)src/ehooks.c \
$(srcroot)src/emap.c \
$(srcroot)src/eset.c \
$(srcroot)src/exp_grow.c \
$(srcroot)src/extent.c \
$(srcroot)src/extent_dss.c \
$(srcroot)src/extent_mmap.c \
$(srcroot)src/hash.c \
$(srcroot)src/fxp.c \
$(srcroot)src/san.c \
$(srcroot)src/san_bump.c \
$(srcroot)src/hook.c \
$(srcroot)src/hpa.c \
$(srcroot)src/hpa_hooks.c \
$(srcroot)src/hpdata.c \
$(srcroot)src/inspect.c \
$(srcroot)src/large.c \
$(srcroot)src/log.c \
$(srcroot)src/malloc_io.c \
$(srcroot)src/mutex.c \
$(srcroot)src/mutex_pool.c \
$(srcroot)src/nstime.c \
$(srcroot)src/pa.c \
$(srcroot)src/pa_extra.c \
$(srcroot)src/pai.c \
$(srcroot)src/pac.c \
$(srcroot)src/pages.c \
$(srcroot)src/prng.c \
$(srcroot)src/peak_event.c \
$(srcroot)src/prof.c \
$(srcroot)src/prof_data.c \
$(srcroot)src/prof_log.c \
$(srcroot)src/prof_recent.c \
$(srcroot)src/prof_stats.c \
$(srcroot)src/prof_sys.c \
$(srcroot)src/psset.c \
$(srcroot)src/rtree.c \
$(srcroot)src/safety_check.c \
$(srcroot)src/stats.c \
$(srcroot)src/sc.c \
$(srcroot)src/sec.c \
$(srcroot)src/stats.c \
$(srcroot)src/sz.c \
$(srcroot)src/tcache.c \
$(srcroot)src/test_hooks.c \
$(srcroot)src/thread_event.c \
$(srcroot)src/ticker.c \
$(srcroot)src/tsd.c \
$(srcroot)src/witness.c
@ -148,88 +177,124 @@ else
LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
endif
PC := $(objroot)jemalloc.pc
MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
$(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
$(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \
$(srcroot)test/src/mtx.c $(srcroot)test/src/sleep.c \
$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
ifeq (1, $(link_whole_archive))
C_UTIL_INTEGRATION_SRCS :=
C_UTIL_CPP_SRCS :=
else
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c \
$(srcroot)src/ticker.c
C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
endif
TESTS_UNIT := \
$(srcroot)test/unit/a0.c \
$(srcroot)test/unit/arena_decay.c \
$(srcroot)test/unit/arena_reset.c \
$(srcroot)test/unit/atomic.c \
$(srcroot)test/unit/background_thread.c \
$(srcroot)test/unit/background_thread_enable.c \
$(srcroot)test/unit/base.c \
$(srcroot)test/unit/batch_alloc.c \
$(srcroot)test/unit/binshard.c \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/bit_util.c \
$(srcroot)test/unit/binshard.c \
$(srcroot)test/unit/buf_writer.c \
$(srcroot)test/unit/cache_bin.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/counter.c \
$(srcroot)test/unit/decay.c \
$(srcroot)test/unit/div.c \
$(srcroot)test/unit/double_free.c \
$(srcroot)test/unit/edata_cache.c \
$(srcroot)test/unit/emitter.c \
$(srcroot)test/unit/extent_quantize.c \
$(srcroot)test/unit/extent_util.c \
${srcroot}test/unit/fb.c \
$(srcroot)test/unit/fork.c \
${srcroot}test/unit/fxp.c \
${srcroot}test/unit/san.c \
${srcroot}test/unit/san_bump.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/hook.c \
$(srcroot)test/unit/hpa.c \
$(srcroot)test/unit/hpa_background_thread.c \
$(srcroot)test/unit/hpdata.c \
$(srcroot)test/unit/huge.c \
$(srcroot)test/unit/inspect.c \
$(srcroot)test/unit/junk.c \
$(srcroot)test/unit/junk_alloc.c \
$(srcroot)test/unit/junk_free.c \
$(srcroot)test/unit/log.c \
$(srcroot)test/unit/mallctl.c \
$(srcroot)test/unit/malloc_conf_2.c \
$(srcroot)test/unit/malloc_io.c \
$(srcroot)test/unit/math.c \
$(srcroot)test/unit/mpsc_queue.c \
$(srcroot)test/unit/mq.c \
$(srcroot)test/unit/mtx.c \
$(srcroot)test/unit/nstime.c \
$(srcroot)test/unit/oversize_threshold.c \
$(srcroot)test/unit/pa.c \
$(srcroot)test/unit/pack.c \
$(srcroot)test/unit/pages.c \
$(srcroot)test/unit/peak.c \
$(srcroot)test/unit/ph.c \
$(srcroot)test/unit/prng.c \
$(srcroot)test/unit/prof_accum.c \
$(srcroot)test/unit/prof_active.c \
$(srcroot)test/unit/prof_gdump.c \
$(srcroot)test/unit/prof_hook.c \
$(srcroot)test/unit/prof_idump.c \
$(srcroot)test/unit/prof_log.c \
$(srcroot)test/unit/prof_mdump.c \
$(srcroot)test/unit/prof_recent.c \
$(srcroot)test/unit/prof_reset.c \
$(srcroot)test/unit/prof_stats.c \
$(srcroot)test/unit/prof_tctx.c \
$(srcroot)test/unit/prof_thread_name.c \
$(srcroot)test/unit/prof_sys_thread_name.c \
$(srcroot)test/unit/psset.c \
$(srcroot)test/unit/ql.c \
$(srcroot)test/unit/qr.c \
$(srcroot)test/unit/rb.c \
$(srcroot)test/unit/retained.c \
$(srcroot)test/unit/rtree.c \
$(srcroot)test/unit/safety_check.c \
$(srcroot)test/unit/sc.c \
$(srcroot)test/unit/sec.c \
$(srcroot)test/unit/seq.c \
$(srcroot)test/unit/SFMT.c \
$(srcroot)test/unit/sc.c \
$(srcroot)test/unit/size_check.c \
$(srcroot)test/unit/size_classes.c \
$(srcroot)test/unit/slab.c \
$(srcroot)test/unit/smoothstep.c \
$(srcroot)test/unit/spin.c \
$(srcroot)test/unit/stats.c \
$(srcroot)test/unit/stats_print.c \
$(srcroot)test/unit/sz.c \
$(srcroot)test/unit/tcache_max.c \
$(srcroot)test/unit/test_hooks.c \
$(srcroot)test/unit/thread_event.c \
$(srcroot)test/unit/ticker.c \
$(srcroot)test/unit/nstime.c \
$(srcroot)test/unit/tsd.c \
$(srcroot)test/unit/uaf.c \
$(srcroot)test/unit/witness.c \
$(srcroot)test/unit/zero.c
$(srcroot)test/unit/zero.c \
$(srcroot)test/unit/zero_realloc_abort.c \
$(srcroot)test/unit/zero_realloc_free.c \
$(srcroot)test/unit/zero_realloc_alloc.c \
$(srcroot)test/unit/zero_reallocs.c
ifeq (@enable_prof@, 1)
TESTS_UNIT += \
$(srcroot)test/unit/arena_reset_prof.c
$(srcroot)test/unit/arena_reset_prof.c \
$(srcroot)test/unit/batch_alloc_prof.c
endif
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/allocated.c \
@ -251,16 +316,26 @@ TESTS_INTEGRATION += \
endif
ifeq (@enable_cxx@, 1)
CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp
TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp \
$(srcroot)test/integration/cpp/infallible_new_true.cpp \
$(srcroot)test/integration/cpp/infallible_new_false.cpp
else
CPP_SRCS :=
TESTS_INTEGRATION_CPP :=
endif
TESTS_STRESS := $(srcroot)test/stress/microbench.c \
$(srcroot)test/stress/hookbench.c
TESTS_ANALYZE := $(srcroot)test/analyze/prof_bias.c \
$(srcroot)test/analyze/rand.c \
$(srcroot)test/analyze/sizes.c
TESTS_STRESS := $(srcroot)test/stress/batch_alloc.c \
$(srcroot)test/stress/fill_flush.c \
$(srcroot)test/stress/hookbench.c \
$(srcroot)test/stress/large_microbench.c \
$(srcroot)test/stress/mallctl.c \
$(srcroot)test/stress/microbench.c
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS)
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) \
$(TESTS_ANALYZE) $(TESTS_STRESS)
PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h
PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h)
@ -276,14 +351,19 @@ C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
C_TESTLIB_ANALYZE_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.analyze.$(O))
C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS)
C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) \
$(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_ANALYZE_OBJS) \
$(C_TESTLIB_STRESS_OBJS)
TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O))
TESTS_ANALYZE_OBJS := $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%.$(O))
TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_ANALYZE_OBJS) \
$(TESTS_STRESS_OBJS)
TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS)
.PHONY: all dist build_doc_html build_doc_man build_doc
@ -298,7 +378,7 @@ all: build_lib
dist: build_doc
$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
$(objroot)doc/%$(install_suffix).html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
ifneq ($(XSLROOT),)
$(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
else
@ -308,9 +388,16 @@ endif
@echo "Missing xsltproc. "$@" not (re)built."
endif
$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
$(objroot)doc/%$(install_suffix).3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
ifneq ($(XSLROOT),)
$(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
# The -o option (output filename) of xsltproc may not work (it uses the
# <refname> in the .xml file). Manually add the suffix if so.
ifneq ($(install_suffix),)
@if [ -f $(objroot)doc/jemalloc.3 ]; then \
mv $(objroot)doc/jemalloc.3 $(objroot)doc/jemalloc$(install_suffix).3 ; \
fi
endif
else
ifeq ($(wildcard $(DOCS_MAN3)),)
@echo "Missing xsltproc. Doc not built." > $@
@ -357,12 +444,15 @@ $(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
$(C_TESTLIB_ANALYZE_OBJS): $(objroot)test/src/%.analyze.$(O): $(srcroot)test/src/%.c
$(C_TESTLIB_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST
$(TESTS_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp
@ -382,7 +472,7 @@ $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h
endif
$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h
$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h
$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_ANALYZE_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_ANALYZE_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h
$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
@mkdir -p $(@D)
@ -406,7 +496,7 @@ $(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS)
$(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
%.h: %.gen.h
@if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi
@if ! `cmp -s $< $@` ; then echo "cp $< $@"; cp $< $@ ; fi
$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O):
@mkdir -p $(@D)
@ -445,6 +535,10 @@ $(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(
@mkdir -p $(@D)
$(CXX) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
$(objroot)test/analyze/%$(EXE): $(objroot)test/analyze/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_ANALYZE_OBJS)
@mkdir -p $(@D)
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
@ -461,20 +555,18 @@ endif
install_bin:
$(INSTALL) -d $(BINDIR)
@for b in $(BINS); do \
echo "$(INSTALL) -m 755 $$b $(BINDIR)"; \
$(INSTALL) -m 755 $$b $(BINDIR); \
$(INSTALL) -v -m 755 $$b $(BINDIR); \
done
install_include:
$(INSTALL) -d $(INCLUDEDIR)/jemalloc
@for h in $(C_HDRS); do \
echo "$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc; \
$(INSTALL) -v -m 644 $$h $(INCLUDEDIR)/jemalloc; \
done
install_lib_shared: $(DSOS)
$(INSTALL) -d $(LIBDIR)
$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
$(INSTALL) -v -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
ifneq ($(SOREV),$(SO))
ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
endif
@ -482,15 +574,13 @@ endif
install_lib_static: $(STATIC_LIBS)
$(INSTALL) -d $(LIBDIR)
@for l in $(STATIC_LIBS); do \
echo "$(INSTALL) -m 755 $$l $(LIBDIR)"; \
$(INSTALL) -m 755 $$l $(LIBDIR); \
$(INSTALL) -v -m 755 $$l $(LIBDIR); \
done
install_lib_pc: $(PC)
$(INSTALL) -d $(LIBDIR)/pkgconfig
@for l in $(PC); do \
echo "$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig"; \
$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \
$(INSTALL) -v -m 644 $$l $(LIBDIR)/pkgconfig; \
done
ifeq ($(enable_shared), 1)
@ -501,21 +591,19 @@ install_lib: install_lib_static
endif
install_lib: install_lib_pc
install_doc_html:
install_doc_html: build_doc_html
$(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
@for d in $(DOCS_HTML); do \
echo "$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
$(INSTALL) -v -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
done
install_doc_man:
install_doc_man: build_doc_man
$(INSTALL) -d $(MANDIR)/man3
@for d in $(DOCS_MAN3); do \
echo "$(INSTALL) -m 644 $$d $(MANDIR)/man3"; \
$(INSTALL) -m 644 $$d $(MANDIR)/man3; \
$(INSTALL) -v -m 644 $$d $(MANDIR)/man3; \
done
install_doc: build_doc install_doc_html install_doc_man
install_doc: install_doc_html install_doc_man
install: install_bin install_include install_lib
@ -523,15 +611,60 @@ ifeq ($(enable_doc), 1)
install: install_doc
endif
uninstall_bin:
$(RM) -v $(foreach b,$(notdir $(BINS)),$(BINDIR)/$(b))
uninstall_include:
$(RM) -v $(foreach h,$(notdir $(C_HDRS)),$(INCLUDEDIR)/jemalloc/$(h))
rmdir -v $(INCLUDEDIR)/jemalloc
uninstall_lib_shared:
$(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SOREV)
ifneq ($(SOREV),$(SO))
$(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SO)
endif
uninstall_lib_static:
$(RM) -v $(foreach l,$(notdir $(STATIC_LIBS)),$(LIBDIR)/$(l))
uninstall_lib_pc:
$(RM) -v $(foreach p,$(notdir $(PC)),$(LIBDIR)/pkgconfig/$(p))
ifeq ($(enable_shared), 1)
uninstall_lib: uninstall_lib_shared
endif
ifeq ($(enable_static), 1)
uninstall_lib: uninstall_lib_static
endif
uninstall_lib: uninstall_lib_pc
uninstall_doc_html:
$(RM) -v $(foreach d,$(notdir $(DOCS_HTML)),$(DATADIR)/doc/jemalloc$(install_suffix)/$(d))
rmdir -v $(DATADIR)/doc/jemalloc$(install_suffix)
uninstall_doc_man:
$(RM) -v $(foreach d,$(notdir $(DOCS_MAN3)),$(MANDIR)/man3/$(d))
uninstall_doc: uninstall_doc_html uninstall_doc_man
uninstall: uninstall_bin uninstall_include uninstall_lib
ifeq ($(enable_doc), 1)
uninstall: uninstall_doc
endif
tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
tests_analyze: $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%$(EXE))
tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
tests: tests_unit tests_integration tests_stress
tests: tests_unit tests_integration tests_analyze tests_stress
check_unit_dir:
@mkdir -p $(objroot)test/unit
check_integration_dir:
@mkdir -p $(objroot)test/integration
analyze_dir:
@mkdir -p $(objroot)test/analyze
stress_dir:
@mkdir -p $(objroot)test/stress
check_dir: check_unit_dir check_integration_dir
@ -548,6 +681,12 @@ check_integration_decay: tests_integration check_integration_dir
$(MALLOC_CONF)="dirty_decay_ms:0,muzzy_decay_ms:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
check_integration: tests_integration check_integration_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
analyze: tests_analyze analyze_dir
ifeq ($(enable_prof), 1)
$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
else
$(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
endif
stress: tests_stress stress_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
check: check_unit check_integration check_integration_decay check_integration_prof

View File

@ -1,5 +1,5 @@
This document summarizes the common approaches for performance fine tuning with
jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work
jemalloc (as of 5.3.0). The default configuration of jemalloc tends to work
reasonably well in practice, and most applications should not have to tune any
options. However, in order to cover a wide range of applications and avoid
pathological cases, the default setting is sometimes kept conservative and
@ -76,14 +76,14 @@ Examples:
* High resource consumption application, prioritizing memory usage:
`background_thread:true` combined with shorter decay time (decreased
`dirty_decay_ms` and / or `muzzy_decay_ms`,
`background_thread:true,tcache_max:4096` combined with shorter decay time
(decreased `dirty_decay_ms` and / or `muzzy_decay_ms`,
e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
(e.g. number of CPUs).
* Low resource consumption application:
`narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased
`narenas:1,tcache_max:1024` combined with shorter decay time (decreased
`dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
`dirty_decay_ms:1000,muzzy_decay_ms:0`).

View File

@ -1 +1 @@
5.2.1-0-g0
5.3.0-0-g0

View File

@ -205,6 +205,8 @@ Output type:
--svg Generate SVG to stdout
--gif Generate GIF to stdout
--raw Generate symbolized jeprof data (useful with remote fetch)
--collapsed Generate collapsed stacks for building flame graphs
(see http://www.brendangregg.com/flamegraphs.html)
Heap-Profile Options:
--inuse_space Display in-use (mega)bytes [default]
@ -238,6 +240,7 @@ Miscellaneous:
--test Run unit tests
--help This message
--version Version information
--debug-syms-by-id (Linux only) Find debug symbol files by build ID as well as by name
Environment Variables:
JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof
@ -332,6 +335,7 @@ sub Init() {
$main::opt_gif = 0;
$main::opt_svg = 0;
$main::opt_raw = 0;
$main::opt_collapsed = 0;
$main::opt_nodecount = 80;
$main::opt_nodefraction = 0.005;
@ -362,6 +366,7 @@ sub Init() {
$main::opt_tools = "";
$main::opt_debug = 0;
$main::opt_test = 0;
$main::opt_debug_syms_by_id = 0;
# These are undocumented flags used only by unittests.
$main::opt_test_stride = 0;
@ -405,6 +410,7 @@ sub Init() {
"svg!" => \$main::opt_svg,
"gif!" => \$main::opt_gif,
"raw!" => \$main::opt_raw,
"collapsed!" => \$main::opt_collapsed,
"interactive!" => \$main::opt_interactive,
"nodecount=i" => \$main::opt_nodecount,
"nodefraction=f" => \$main::opt_nodefraction,
@ -429,6 +435,7 @@ sub Init() {
"tools=s" => \$main::opt_tools,
"test!" => \$main::opt_test,
"debug!" => \$main::opt_debug,
"debug-syms-by-id!" => \$main::opt_debug_syms_by_id,
# Undocumented flags used only by unittests:
"test_stride=i" => \$main::opt_test_stride,
) || usage("Invalid option(s)");
@ -490,6 +497,7 @@ sub Init() {
$main::opt_svg +
$main::opt_gif +
$main::opt_raw +
$main::opt_collapsed +
$main::opt_interactive +
0;
if ($modes > 1) {
@ -572,6 +580,11 @@ sub Init() {
foreach (@prefix_list) {
s|/+$||;
}
# Flag to prevent us from trying over and over to use
# elfutils if it's not installed (used only with
# --debug-syms-by-id option).
$main::gave_up_on_elfutils = 0;
}
sub FilterAndPrint {
@ -621,6 +634,8 @@ sub FilterAndPrint {
PrintText($symbols, $flat, $cumulative, -1);
} elsif ($main::opt_raw) {
PrintSymbolizedProfile($symbols, $profile, $main::prog);
} elsif ($main::opt_collapsed) {
PrintCollapsedStacks($symbols, $profile);
} elsif ($main::opt_callgrind) {
PrintCallgrind($calls);
} else {
@ -2810,6 +2825,40 @@ sub IsSecondPcAlwaysTheSame {
return $second_pc;
}
sub ExtractSymbolNameInlineStack {
my $symbols = shift;
my $address = shift;
my @stack = ();
if (exists $symbols->{$address}) {
my @localinlinestack = @{$symbols->{$address}};
for (my $i = $#localinlinestack; $i > 0; $i-=3) {
my $file = $localinlinestack[$i-1];
my $fn = $localinlinestack[$i-0];
if ($file eq "?" || $file eq ":0") {
$file = "??:0";
}
if ($fn eq '??') {
# If we can't get the symbol name, at least use the file information.
$fn = $file;
}
my $suffix = "[inline]";
if ($i == 2) {
$suffix = "";
}
push (@stack, $fn.$suffix);
}
}
else {
# If we can't get a symbol name, at least fill in the address.
push (@stack, $address);
}
return @stack;
}
sub ExtractSymbolLocation {
my $symbols = shift;
my $address = shift;
@ -2884,6 +2933,17 @@ sub FilterFrames {
return $result;
}
sub PrintCollapsedStacks {
my $symbols = shift;
my $profile = shift;
while (my ($stack_trace, $count) = each %$profile) {
my @address = split(/\n/, $stack_trace);
my @names = reverse ( map { ExtractSymbolNameInlineStack($symbols, $_) } @address );
printf("%s %d\n", join(";", @names), $count);
}
}
sub RemoveUninterestingFrames {
my $symbols = shift;
my $profile = shift;
@ -4440,16 +4500,54 @@ sub FindLibrary {
# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
sub DebuggingLibrary {
my $file = shift;
if ($file =~ m|^/|) {
if (-f "/usr/lib/debug$file") {
return "/usr/lib/debug$file";
} elsif (-f "/usr/lib/debug$file.debug") {
return "/usr/lib/debug$file.debug";
}
if ($file !~ m|^/|) {
return undef;
}
# Find debug symbol file if it's named after the library's name.
if (-f "/usr/lib/debug$file") {
if($main::opt_debug) { print STDERR "found debug info for $file in /usr/lib/debug$file\n"; }
return "/usr/lib/debug$file";
} elsif (-f "/usr/lib/debug$file.debug") {
if($main::opt_debug) { print STDERR "found debug info for $file in /usr/lib/debug$file.debug\n"; }
return "/usr/lib/debug$file.debug";
}
if(!$main::opt_debug_syms_by_id) {
if($main::opt_debug) { print STDERR "no debug symbols found for $file\n" };
return undef;
}
# Find debug file if it's named after the library's build ID.
my $readelf = '';
if (!$main::gave_up_on_elfutils) {
$readelf = qx/eu-readelf -n ${file}/;
if ($?) {
print STDERR "Cannot run eu-readelf. To use --debug-syms-by-id you must be on Linux, with elfutils installed.\n";
$main::gave_up_on_elfutils = 1;
return undef;
}
my $buildID = $1 if $readelf =~ /Build ID: ([A-Fa-f0-9]+)/s;
if (defined $buildID && length $buildID > 0) {
my $symbolFile = '/usr/lib/debug/.build-id/' . substr($buildID, 0, 2) . '/' . substr($buildID, 2) . '.debug';
if (-e $symbolFile) {
if($main::opt_debug) { print STDERR "found debug symbol file $symbolFile for $file\n" };
return $symbolFile;
} else {
if($main::opt_debug) { print STDERR "no debug symbol file found for $file, build ID: $buildID\n" };
return undef;
}
}
}
if($main::opt_debug) { print STDERR "no debug symbols found for $file, build ID unknown\n" };
return undef;
}
# Parse text section header of a library using objdump
sub ParseTextSectionHeaderFromObjdump {
my $lib = shift;
@ -4987,7 +5085,7 @@ sub MapToSymbols {
} else {
# MapSymbolsWithNM tags each routine with its starting address,
# useful in case the image has multiple occurrences of this
# routine. (It uses a syntax that resembles template paramters,
# routine. (It uses a syntax that resembles template parameters,
# that are automatically stripped out by ShortFunctionName().)
# addr2line does not provide the same information. So we check
# if nm disambiguated our symbol, and if so take the annotated
@ -5339,7 +5437,7 @@ sub GetProcedureBoundaries {
# "nm -f $image" is supposed to fail on GNU nm, but if:
#
# a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
# b. you have a.out in your current directory (a not uncommon occurence)
# b. you have a.out in your current directory (a not uncommon occurrence)
#
# then "nm -f $image" succeeds because -f only looks at the first letter of
# the argument, which looks valid because it's [BbSsPp], and then since

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

9742
deps/jemalloc/configure vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -630,7 +630,7 @@ for (i = 0; i < nbins; i++) {
</row>
<row>
<entry>8 KiB</entry>
<entry>[40 KiB, 48 KiB, 54 KiB, 64 KiB]</entry>
<entry>[40 KiB, 48 KiB, 56 KiB, 64 KiB]</entry>
</row>
<row>
<entry>16 KiB</entry>
@ -936,6 +936,22 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
</para></listitem>
</varlistentry>
<varlistentry id="opt.cache_oblivious">
<term>
<mallctl>opt.cache_oblivious</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para>Enable / Disable cache-oblivious large allocation
alignment, for large requests with no alignment constraints. If this
feature is disabled, all large allocations are page-aligned as an
implementation artifact, which can severely harm CPU cache utilization.
However, the cache-oblivious layout comes at the cost of one extra page
per large allocation, which in the most extreme case increases physical
memory usage for the 16 KiB size class to 20 KiB. This option is enabled
by default.</para></listitem>
</varlistentry>
<varlistentry id="opt.metadata_thp">
<term>
<mallctl>opt.metadata_thp</mallctl>
@ -950,6 +966,17 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
is <quote>disabled</quote>.</para></listitem>
</varlistentry>
<varlistentry id="opt.trust_madvise">
<term>
<mallctl>opt.trust_madvise</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para>If true, do not perform runtime check for MADV_DONTNEED,
to check that it actually zeros pages. The default is disabled on Linux
and enabled elsewhere.</para></listitem>
</varlistentry>
<varlistentry id="opt.retain">
<term>
<mallctl>opt.retain</mallctl>
@ -1185,6 +1212,41 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
enabled. The default is <quote></quote>.</para></listitem>
</varlistentry>
<varlistentry id="opt.stats_interval">
<term>
<mallctl>opt.stats_interval</mallctl>
(<type>int64_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Average interval between statistics outputs, as measured
in bytes of allocation activity. The actual interval may be sporadic
because decentralized event counters are used to avoid synchronization
bottlenecks. The output may be triggered on any thread, which then
calls <function>malloc_stats_print()</function>. <link
linkend="opt.stats_interval_opts"><mallctl>opt.stats_interval_opts</mallctl></link>
can be combined to specify output options. By default,
interval-triggered stats output is disabled (encoded as
-1).</para></listitem>
</varlistentry>
<varlistentry id="opt.stats_interval_opts">
<term>
<mallctl>opt.stats_interval_opts</mallctl>
(<type>const char *</type>)
<literal>r-</literal>
</term>
<listitem><para>Options (the <parameter>opts</parameter> string) to pass
to the <function>malloc_stats_print()</function> for interval based
statistics printing (enabled
through <link
linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link>). See
available options in <link
linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
Has no effect unless <link
linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link> is
enabled. The default is <quote></quote>.</para></listitem>
</varlistentry>
<varlistentry id="opt.junk">
<term>
<mallctl>opt.junk</mallctl>
@ -1266,21 +1328,23 @@ malloc_conf = "xmalloc:true";]]></programlisting>
a certain size. Thread-specific caching allows many allocations to be
satisfied without performing any thread synchronization, at the cost of
increased memory use. See the <link
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
linkend="opt.tcache_max"><mallctl>opt.tcache_max</mallctl></link>
option for related tuning information. This option is enabled by
default.</para></listitem>
</varlistentry>
<varlistentry id="opt.lg_tcache_max">
<varlistentry id="opt.tcache_max">
<term>
<mallctl>opt.lg_tcache_max</mallctl>
<mallctl>opt.tcache_max</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Maximum size class (log base 2) to cache in the
thread-specific cache (tcache). At a minimum, all small size classes
are cached, and at a maximum all large size classes are cached. The
default maximum is 32 KiB (2^15).</para></listitem>
<listitem><para>Maximum size class to cache in the thread-specific cache
(tcache). At a minimum, the first size class is cached; and at a
maximum, size classes up to 8 MiB can be cached. The default maximum is
32 KiB (2^15). As a convenience, this may also be set by specifying
lg_tcache_max, which will be taken to be the base-2 logarithm of the
setting of tcache_max.</para></listitem>
</varlistentry>
<varlistentry id="opt.thp">
@ -1344,7 +1408,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
set to the empty string, no automatic dumps will occur; this is
primarily useful for disabling the automatic final heap dump (which
also disables leak reporting, if enabled). The default prefix is
<filename>jeprof</filename>.</para></listitem>
<filename>jeprof</filename>. This prefix value can be overridden by
<link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>.
</para></listitem>
</varlistentry>
<varlistentry id="opt.prof_active">
@ -1423,8 +1489,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the
<link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
option. By default, interval-triggered profile dumping is disabled
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
<link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
options. By default, interval-triggered profile dumping is disabled
(encoded as -1).
</para></listitem>
</varlistentry>
@ -1456,8 +1523,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
usage to a file named according to the pattern
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the <link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
option. Note that <function>atexit()</function> may allocate
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
<link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
options. Note that <function>atexit()</function> may allocate
memory during application initialization and then deadlock internally
when jemalloc in turn calls <function>atexit()</function>, so
this option is not universally usable (though the application can
@ -1478,8 +1546,57 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<manvolnum>3</manvolnum></citerefentry> function to report memory leaks
detected by allocation sampling. See the
<link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
information on analyzing heap profile output. This option is disabled
by default.</para></listitem>
information on analyzing heap profile output. Works only when combined
with <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl>
</link>, otherwise does nothing. This option is disabled by default.
</para></listitem>
</varlistentry>
<varlistentry id="opt.prof_leak_error">
<term>
<mallctl>opt.prof_leak_error</mallctl>
(<type>bool</type>)
<literal>r-</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Similar to <link linkend="opt.prof_leak"><mallctl>
opt.prof_leak</mallctl></link>, but makes the process exit with error
code 1 if a memory leak is detected. This option supersedes
<link linkend="opt.prof_leak"><mallctl>opt.prof_leak</mallctl></link>,
meaning that if both are specified, this option takes precedence. When
enabled, also enables <link linkend="opt.prof_leak"><mallctl>
opt.prof_leak</mallctl></link>. Works only when combined with
<link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>,
otherwise does nothing. This option is disabled by default.
</para></listitem>
</varlistentry>
<varlistentry id="opt.zero_realloc">
<term>
<mallctl>opt.zero_realloc</mallctl>
(<type>const char *</type>)
<literal>r-</literal>
</term>
<listitem><para> Determines the behavior of
<function>realloc()</function> when passed a value of zero for the new
size. <quote>alloc</quote> treats this as an allocation of size zero
(and returns a non-null result except in case of resource exhaustion).
<quote>free</quote> treats this as a deallocation of the pointer, and
returns <constant>NULL</constant> without setting
<varname>errno</varname>. <quote>abort</quote> aborts the process if
zero is passed. The default is <quote>free</quote> on Linux and
Windows, and <quote>alloc</quote> elsewhere.</para>
<para>There is considerable divergence of behaviors across
implementations in handling this case. Many have the behavior of
<quote>free</quote>. This can introduce security vulnerabilities, since
a <constant>NULL</constant> return value indicates failure, and the
continued validity of the passed-in pointer (per POSIX and C11).
<quote>alloc</quote> is safe, but can cause leaks in programs that
expect the common behavior. Programs intended to be portable and
leak-free cannot assume either behavior, and must therefore never call
realloc with a size of 0. The <quote>abort</quote> option enables these
testing this behavior.</para></listitem>
</varlistentry>
<varlistentry id="thread.arena">
@ -1520,7 +1637,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<link
linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
mallctl. This is useful for avoiding the overhead of repeated
<function>mallctl*()</function> calls.</para></listitem>
<function>mallctl*()</function> calls. Note that the underlying counter
should not be modified by the application.</para></listitem>
</varlistentry>
<varlistentry id="thread.deallocated">
@ -1547,7 +1665,44 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<link
linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
mallctl. This is useful for avoiding the overhead of repeated
<function>mallctl*()</function> calls.</para></listitem>
<function>mallctl*()</function> calls. Note that the underlying counter
should not be modified by the application.</para></listitem>
</varlistentry>
<varlistentry id="thread.peak.read">
<term>
<mallctl>thread.peak.read</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Get an approximation of the maximum value of the
difference between the number of bytes allocated and the number of bytes
deallocated by the calling thread since the last call to <link
linkend="thread.peak.reset"><mallctl>thread.peak.reset</mallctl></link>,
or since the thread's creation if it has not called <link
linkend="thread.peak.reset"><mallctl>thread.peak.reset</mallctl></link>.
No guarantees are made about the quality of the approximation, but
jemalloc currently endeavors to maintain accuracy to within one hundred
kilobytes.
</para></listitem>
</varlistentry>
<varlistentry id="thread.peak.reset">
<term>
<mallctl>thread.peak.reset</mallctl>
(<type>void</type>)
<literal>--</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Resets the counter for net bytes allocated in the calling
thread to zero. This affects subsequent calls to <link
linkend="thread.peak.read"><mallctl>thread.peak.read</mallctl></link>,
but not the values returned by <link
linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
or <link
linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>.
</para></listitem>
</varlistentry>
<varlistentry id="thread.tcache.enabled">
@ -1618,6 +1773,28 @@ malloc_conf = "xmalloc:true";]]></programlisting>
default.</para></listitem>
</varlistentry>
<varlistentry id="thread.idle">
<term>
<mallctl>thread.idle</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Hints to jemalloc that the calling thread will be idle
for some nontrivial period of time (say, on the order of seconds), and
that doing some cleanup operations may be beneficial. There are no
guarantees as to what specific operations will be performed; currently
this flushes the caller's tcache and may (according to some heuristic)
purge its associated arena.</para>
<para>This is not intended to be a general-purpose background activity
mechanism, and threads should not wake up multiple times solely to call
it. Rather, a thread waiting for a task should do a timed wait first,
call <link linkend="thread.idle"><mallctl>thread.idle</mallctl></link>
if no task appears in the timeout interval, and then do an untimed wait.
For such a background activity mechanism, see
<link linkend="background_thread"><mallctl>background_thread</mallctl></link>.
</para></listitem>
</varlistentry>
<varlistentry id="tcache.create">
<term>
<mallctl>tcache.create</mallctl>
@ -1631,7 +1808,16 @@ malloc_conf = "xmalloc:true";]]></programlisting>
automatically managed one that is used by default. Each explicit cache
can be used by only one thread at a time; the application must assure
that this constraint holds.
</para>
<para>If the amount of space supplied for storing the thread-specific
cache identifier does not equal
<code language="C">sizeof(<type>unsigned</type>)</code>, no
thread-specific cache will be created, no data will be written to the
space pointed by <parameter>oldp</parameter>, and
<parameter>*oldlenp</parameter> will be set to 0.
</para></listitem>
</varlistentry>
<varlistentry id="tcache.flush">
@ -2171,7 +2357,14 @@ struct extent_hooks_s {
</term>
<listitem><para>Explicitly create a new arena outside the range of
automatically managed arenas, with optionally specified extent hooks,
and return the new arena index.</para></listitem>
and return the new arena index.</para>
<para>If the amount of space supplied for storing the arena index does
not equal <code language="C">sizeof(<type>unsigned</type>)</code>, no
arena will be created, no data will be written to the space pointed by
<parameter>oldp</parameter>, and <parameter>*oldlenp</parameter> will
be set to 0.
</para></listitem>
</varlistentry>
<varlistentry id="arenas.lookup">
@ -2223,9 +2416,24 @@ struct extent_hooks_s {
is specified, to a file according to the pattern
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the
<link linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
and <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
options.</para></listitem>
</varlistentry>
<varlistentry id="prof.prefix">
<term>
<mallctl>prof.prefix</mallctl>
(<type>const char *</type>)
<literal>-w</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Set the filename prefix for profile dumps. See
<link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
option.</para></listitem>
for the default setting. This can be useful to differentiate profile
dumps such as from forked processes.
</para></listitem>
</varlistentry>
<varlistentry id="prof.gdump">
@ -2240,8 +2448,9 @@ struct extent_hooks_s {
dumped to files named according to the pattern
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the <link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
option.</para></listitem>
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
<link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
options.</para></listitem>
</varlistentry>
<varlistentry id="prof.reset">
@ -2398,6 +2607,21 @@ struct extent_hooks_s {
</para></listitem>
</varlistentry>
<varlistentry id="stats.zero_reallocs">
<term>
<mallctl>stats.zero_reallocs</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of times that the <function>realloc()</function>
was called with a non-<constant>NULL</constant> pointer argument and a
<constant>0</constant> size argument. This is a fundamentally unsafe
pattern in portable programs; see <link linkend="opt.zero_realloc">
<mallctl>opt.zero_realloc</mallctl></link> for details.
</para></listitem>
</varlistentry>
<varlistentry id="stats.background_thread.num_threads">
<term>
<mallctl>stats.background_thread.num_threads</mallctl>
@ -2509,6 +2733,30 @@ struct extent_hooks_s {
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.mutexes.prof_thds_data">
<term>
<mallctl>stats.mutexes.prof_thds_data.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>prof</varname> threads data mutex
(global scope; profiling related). <mallctl>{counter}</mallctl> is one
of the counters in <link linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.mutexes.prof_dump">
<term>
<mallctl>stats.mutexes.prof_dump.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>prof</varname> dumping mutex
(global scope; profiling related). <mallctl>{counter}</mallctl> is one
of the counters in <link linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.mutexes.reset">
<term>
<mallctl>stats.mutexes.reset</mallctl>
@ -3250,7 +3498,7 @@ heap_v2/524288
[...]
@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
t*: 13: 6688 [0: 0]
t3: 12: 6496 [0: ]
t3: 12: 6496 [0: 0]
t99: 1: 192 [0: 0]
[...]
@ -3261,9 +3509,9 @@ descriptions of the corresponding fields. <programlisting><![CDATA[
<heap_profile_format_version>/<mean_sample_interval>
<aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
[...]
<thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
<thread_3_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
[...]
<thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
<thread_99_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
[...]
@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
<backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
@ -3420,8 +3668,10 @@ MAPPED_LIBRARIES:
<listitem><para><parameter>newp</parameter> is not
<constant>NULL</constant>, and <parameter>newlen</parameter> is too
large or too small. Alternatively, <parameter>*oldlenp</parameter>
is too large or too small; in this case as much data as possible
are read despite the error.</para></listitem>
is too large or too small; when it happens, except for a very few
cases explicitly documented otherwise, as much data as possible
are read despite the error, with the amount of data read being
recorded in <parameter>*oldlenp</parameter>.</para></listitem>
</varlistentry>
<varlistentry>
<term><errorname>ENOENT</errorname></term>

View File

@ -0,0 +1,127 @@
# jemalloc profiling
This describes the mathematical basis behind jemalloc's profiling implementation, as well as the implementation tricks that make it effective. Historically, the jemalloc profiling design simply copied tcmalloc's. The implementation has since diverged, due to both the desire to record additional information, and to correct some biasing bugs.
Note: this document is markdown with embedded LaTeX; different markdown renderers may not produce the expected output. Viewing with `pandoc -s PROFILING_INTERNALS.md -o PROFILING_INTERNALS.pdf` is recommended.
## Some tricks in our implementation toolbag
### Sampling
Recording our metadata is quite expensive; we need to walk up the stack to get a stack trace. On top of that, we need to allocate storage to record that stack trace, and stick it somewhere where a profile-dumping call can find it. That call might happen on another thread, so we'll probably need to take a lock to do so. These costs are quite large compared to the average cost of an allocation. To manage this, we'll only sample some fraction of allocations. This will miss some of them, so our data will be incomplete, but we'll try to make up for it. We can tune our sampling rate to balance accuracy and performance.
### Fast Bernoulli sampling
Compared to our fast paths, even a `coinflip(p)` function can be quite expensive. Having to do a random-number generation and some floating point operations would be a sizeable relative cost. However (as pointed out in [[Vitter, 1987](https://dl.acm.org/doi/10.1145/23002.23003)]), if we can orchestrate our algorithm so that many of our `coinflip` calls share their parameter value, we can do better. We can sample from the geometric distribution, and initialize a counter with the result. When the counter hits 0, the `coinflip` function returns true (and reinitializes its internal counter).
This can let us do a random-number generation once per (logical) coinflip that comes up heads, rather than once per (logical) coinflip. Since we expect to sample relatively rarely, this can be a large win.
### Fast-path / slow-path thinking
Most programs have a skewed distribution of allocations. Smaller allocations are much more frequent than large ones, but shorter lived and less common as a fraction of program memory. "Small" and "large" are necessarily sort of fuzzy terms, but if we define "small" as "allocations jemalloc puts into slabs" and "large" as the others, then it's not uncommon for small allocations to be hundreds of times more frequent than large ones, but take up around half the amount of heap space as large ones. Moreover, small allocations tend to be much cheaper than large ones (often by a factor of 20-30): they're more likely to hit in thread caches, less likely to have to do an mmap, and cheaper to fill (by the user) once the allocation has been returned.
## An unbiased estimator of space consumption from (almost) arbitrary sampling strategies
Suppose we have a sampling strategy that meets the following criteria:
- One allocation being sampled is independent of other allocations being sampled.
- Each allocation has a non-zero probability of being sampled.
We can then estimate the bytes in live allocations through some particular stack trace as:
$$ \sum_i S_i I_i \frac{1}{\mathrm{E}[I_i]} $$
where the sum ranges over some index variable of live allocations from that stack, $S_i$ is the size of the $i$'th allocation, and $I_i$ is an indicator random variable for whether or not the $i'th$ allocation is sampled. $S_i$ and $\mathrm{E}[I_i]$ are constants (the program allocations are fixed; the random variables are the sampling decisions), so taking the expectation we get
$$ \sum_i S_i \mathrm{E}[I_i] \frac{1}{\mathrm{E}[I_i]}.$$
This is of course $\sum_i S_i$, as we want (and, a similar calculation could be done for allocation counts as well).
This is a fairly general strategy; note that while we require that sampling decisions be independent of one another's outcomes, they don't have to be independent of previous allocations, total bytes allocated, etc. You can imagine strategies that:
- Sample allocations at program startup at a higher rate than subsequent allocations
- Sample even-indexed allocations more frequently than odd-indexed ones (so long as no allocation has zero sampling probability)
- Let threads declare themselves as high-sampling-priority, and sample their allocations at an increased rate.
These can all be fit into this framework to give an unbiased estimator.
## Evaluating sampling strategies
Not all strategies for picking allocations to sample are equally good, of course. Among unbiased estimators, the lower the variance, the lower the mean squared error. Using the estimator above, the variance is:
$$
\begin{aligned}
& \mathrm{Var}[\sum_i S_i I_i \frac{1}{\mathrm{E}[I_i]}] \\
=& \sum_i \mathrm{Var}[S_i I_i \frac{1}{\mathrm{E}[I_i]}] \\
=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{Var}[I_i] \\
=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{Var}[I_i] \\
=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{E}[I_i](1 - \mathrm{E}[I_i]) \\
=& \sum_i S_i^2 \frac{1 - \mathrm{E}[I_i]}{\mathrm{E}[I_i]}.
\end{aligned}
$$
We can use this formula to compare various strategy choices. All else being equal, lower-variance strategies are better.
## Possible sampling strategies
Because of the desire to avoid the fast-path costs, we'd like to use our Bernoulli trick if possible. There are two obvious counters to use: a coinflip per allocation, and a coinflip per byte allocated.
### Bernoulli sampling per-allocation
An obvious strategy is to pick some large $N$, and give each allocation a $1/N$ chance of being sampled. This would let us use our Bernoulli-via-Geometric trick. Using the formula from above, we can compute the variance as:
$$ \sum_i S_i^2 \frac{1 - \frac{1}{N}}{\frac{1}{N}} = (N-1) \sum_i S_i^2.$$
That is, an allocation of size $Z$ contributes a term of $(N-1)Z^2$ to the variance.
### Bernoulli sampling per-byte
Another option we have is to pick some rate $R$, and give each byte a $1/R$ chance of being picked for sampling (at which point we would sample its contained allocation). The chance of an allocation of size $Z$ being sampled, then, is
$$1-(1-\frac{1}{R})^{Z}$$
and an allocation of size $Z$ contributes a term of
$$Z^2 \frac{(1-\frac{1}{R})^{Z}}{1-(1-\frac{1}{R})^{Z}}.$$
In practical settings, $R$ is large, and so this is well-approximated by
$$Z^2 \frac{e^{-Z/R}}{1 - e^{-Z/R}} .$$
Just to get a sense of the dynamics here, let's look at the behavior for various values of $Z$. When $Z$ is small relative to $R$, we can use $e^z \approx 1 + x$, and conclude that the variance contributed by a small-$Z$ allocation is around
$$Z^2 \frac{1-Z/R}{Z/R} \approx RZ.$$
When $Z$ is comparable to $R$, the variance term is near $Z^2$ (we have $\frac{e^{-Z/R}}{1 - e^{-Z/R}} = 1$ when $Z/R = \ln 2 \approx 0.693$). When $Z$ is large relative to $R$, the variance term goes to zero.
## Picking a sampling strategy
The fast-path/slow-path dynamics of allocation patterns point us towards the per-byte sampling approach:
- The quadratic increase in variance per allocation in the first approach is quite costly when heaps have a non-negligible portion of their bytes in those allocations, which is practically often the case.
- The Bernoulli-per-byte approach shifts more of its samples towards large allocations, which are already a slow-path.
- We drive several tickers (e.g. tcache gc) by bytes allocated, and report bytes-allocated as a user-visible statistic, so we have to do all the necessary bookkeeping anyways.
Indeed, this is the approach we use in jemalloc. Our heap dumps record the size of the allocation and the sampling rate $R$, and jeprof unbiases by dividing by $1 - e^{-Z/R}$. The framework above would suggest dividing by $1-(1-1/R)^Z$; instead, we use the fact that $R$ is large in practical situations, and so $e^{-Z/R}$ is a good approximation (and faster to compute). (Equivalently, we may also see this as the factor that falls out from viewing sampling as a Poisson process directly).
## Consequences for heap dump consumers
Using this approach means that there are a few things users need to be aware of.
### Stack counts are not proportional to allocation frequencies
If one stack appears twice as often as another, this by itself does not imply that it allocates twice as often. Consider the case in which there are only two types of allocating call stacks in a program. Stack A allocates 8 bytes, and occurs a million times in a program. Stack B allocates 8 MB, and occurs just once in a program. If our sampling rate $R$ is about 1MB, we expect stack A to show up about 8 times, and stack B to show up once. Stack A isn't 8 times more frequent than stack B, though; it's a million times more frequent.
### Aggregation must be done after unbiasing samples
Some tools manually parse heap dump output, and aggregate across stacks (or across program runs) to provide wider-scale data analyses. When doing this aggregation, though, it's important to unbias-and-then-sum, rather than sum-and-then-unbias. Reusing our example from the previous section: suppose we collect heap dumps of the program from a million machines. We then have 8 million occurs of stack A (each of 8 bytes), and a million occurrences of stack B (each of 8 MB). If we sum first, we'll attribute 64 MB to stack A, and 8 TB to stack B. Unbiasing changes these numbers by an infinitesimal amount, so that sum-then-unbias dramatically underreports the amount of memory allocated by stack A.
## An avenue for future exploration
While the framework we laid out above is pretty general, as an engineering decision we're only interested in fairly simple approaches (i.e. ones for which the chance of an allocation being sampled depends only on its size). Our job is then: for each size class $Z$, pick a probability $p_Z$ that an allocation of that size will be sampled. We made some handwave-y references to statistical distributions to justify our choices, but there's no reason we need to pick them that way. Any set of non-zero probabilities is a valid choice.
The real limiting factor in our ability to reduce estimator variance is that fact that sampling is expensive; we want to make sure we only do it on a small fraction of allocations. Our goal, then, is to pick the $p_Z$ to minimize variance given some maximum sampling rate $P$. If we define $a_Z$ to be the fraction of allocations of size $Z$, and $l_Z$ to be the fraction of allocations of size $Z$ still alive at the time of a heap dump, then we can phrase this as an optimization problem over the choices of $p_Z$:
Minimize
$$ \sum_Z Z^2 l_Z \frac{1-p_Z}{p_Z} $$
subject to
$$ \sum_Z a_Z p_Z \leq P $$
Ignoring a term that doesn't depend on $p_Z$, the objective is minimized whenever
$$ \sum_Z Z^2 l_Z \frac{1}{p_Z} $$
is. For a particular program, $l_Z$ and $a_Z$ are just numbers that can be obtained (exactly) from existing stats introspection facilities, and we have a fairly tractable convex optimization problem (it can be framed as a second-order cone program). It would be interesting to evaluate, for various common allocation patterns, how well our current strategy adapts. Do our actual choices for $p_Z$ closely correspond to the optimal ones? How close is the variance of our choices to the variance of the optimal strategy?
You can imagine an implementation that actually goes all the way, and makes $p_Z$ selections a tuning parameter. I don't think this is a good use of development time for the foreseeable future; but I do wonder about the answers to some of these questions.
## Implementation realities
The nice story above is at least partially a lie. Initially, jeprof (copying its logic from pprof) had the sum-then-unbias error described above. The current version of jemalloc does the unbiasing step on a per-allocation basis internally, so that we're always tracking what the unbiased numbers "should" be. The problem is, actually surfacing those unbiased numbers would require a breaking change to jeprof (and the various already-deployed tools that have copied its logic). Instead, we use a little bit more trickery. Since we know at dump time the numbers we want jeprof to report, we simply choose the values we'll output so that the jeprof numbers will match the true numbers. The math is described in `src/prof_data.c` (where the only cleverness is a change of variables that lets the exponentials fall out).
This has the effect of making the output of jeprof (and related tools) correct, while making its inputs incorrect. This can be annoying to human readers of raw profiling dump output.

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 16 KiB

View File

@ -0,0 +1,23 @@
#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
/*
* The callback to be executed "periodically", in response to some amount of
* allocator activity.
*
* This callback need not be computing any sort of peak (although that's the
* intended first use case), but we drive it from the peak counter, so it's
* keeps things tidy to keep it here.
*
* The calls to this thunk get driven by the peak_event module.
*/
#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
typedef void (*activity_callback_t)(void *uctx, uint64_t allocated,
uint64_t deallocated);
typedef struct activity_callback_thunk_s activity_callback_thunk_t;
struct activity_callback_thunk_s {
activity_callback_t callback;
void *uctx;
};
#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */

View File

@ -2,59 +2,67 @@
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/stats.h"
/*
* When the amount of pages to be purged exceeds this amount, deferred purge
* should happen.
*/
#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms;
extern percpu_arena_mode_t opt_percpu_arena;
extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern div_info_t arena_binind_div_info[SC_NBINS];
extern malloc_mutex_t arenas_lock;
extern emap_t arena_emap_global;
extern size_t opt_oversize_threshold;
extern size_t oversize_threshold;
/*
* arena_bin_offsets[binind] is the offset of the first bin shard for size class
* binind.
*/
extern uint32_t arena_bin_offsets[SC_NBINS];
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
#endif
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
extent_t *extent);
edata_t *edata);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
edata_t *edata, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
edata_t *edata, size_t oldsize);
bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms);
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all);
uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
bool zero);
typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
const unsigned nfill);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
@ -63,8 +71,12 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *extent, void *ptr);
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
edata_t *slab, bin_t *bin);
void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
edata_t *slab, bin_t *bin);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);
@ -72,6 +84,9 @@ void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
dss_prec_t arena_dss_prec_get(arena_t *arena);
ehooks_t *arena_get_ehooks(arena_t *arena);
extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_dirty_decay_ms_default_get(void);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
@ -82,14 +97,15 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
bool arena_init_huge(void);
bool arena_is_huge(unsigned arena_ind);
arena_t *arena_choose_huge(tsd_t *tsd);
bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard);
void arena_boot(sc_data_t *sc_data);
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void **ptrs, size_t nfill, bool zero);
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
@ -98,6 +114,7 @@ void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);

View File

@ -3,7 +3,7 @@
static inline unsigned
arena_ind_get(const arena_t *arena) {
return base_ind_get(arena->base);
return arena->ind;
}
static inline void
@ -21,37 +21,4 @@ arena_internal_get(arena_t *arena) {
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
}
static inline bool
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
return false;
}
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
}
static inline void
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
assert(have_percpu_arena);
arena_t *oldarena = tsd_arena_get(tsd);
assert(oldarena != NULL);
unsigned oldind = arena_ind_get(oldarena);
if (oldind != cpu) {
unsigned newind = cpu;
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
assert(newarena != NULL);
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena);
}
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */

View File

@ -1,16 +1,20 @@
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE bool
arena_has_default_hooks(arena_t *arena) {
return (extent_hooks_get(arena) == &extent_hooks_default);
static inline arena_t *
arena_get_from_edata(edata_t *edata) {
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE arena_t *
@ -34,127 +38,109 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
return arena_choose(tsd, NULL);
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
JEMALLOC_ALWAYS_INLINE void
arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
prof_info_t *prof_info, bool reset_recent) {
cassert(config_prof);
assert(ptr != NULL);
assert(prof_info != NULL);
edata_t *edata = NULL;
bool is_slab;
/* Static check. */
if (alloc_ctx == NULL) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
is_slab = edata_slab_get(edata);
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
}
if (unlikely(!is_slab)) {
/* edata must have been initialized at this point. */
assert(edata != NULL);
large_prof_info_get(tsd, edata, prof_info, reset_recent);
} else {
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
/*
* No need to set other fields in prof_info; they will never be
* accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
*/
}
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(!extent_slab_get(extent))) {
return large_prof_tctx_get(tsdn, extent);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&arena_emap_global, ptr);
if (unlikely(!edata_slab_get(edata))) {
large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
}
}
return (prof_tctx_t *)(uintptr_t)1U;
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(!extent_slab_get(extent))) {
large_prof_tctx_set(tsdn, extent, tctx);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&arena_emap_global, ptr);
large_prof_tctx_reset(edata);
}
}
}
static inline void
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsdn, ptr);
assert(!extent_slab_get(extent));
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
assert(!edata_slab_get(edata));
large_prof_tctx_reset(tsdn, extent);
}
JEMALLOC_ALWAYS_INLINE nstime_t
arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsdn, ptr);
/*
* Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
* sure we have a sampled allocation.
*/
assert(!extent_slab_get(extent));
return large_prof_alloc_time_get(extent);
large_prof_tctx_reset(edata);
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
nstime_t t) {
arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
size_t size) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsdn, ptr);
assert(!extent_slab_get(extent));
large_prof_alloc_time_set(extent, t);
assert(!edata_slab_get(edata));
large_prof_info_set(edata, tctx, size);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
tsd_t *tsd;
ticker_t *decay_ticker;
if (unlikely(tsdn_null(tsdn))) {
return;
}
tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
if (unlikely(decay_ticker == NULL)) {
return;
}
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
tsd_t *tsd = tsdn_tsd(tsdn);
/*
* We use the ticker_geom_t to avoid having per-arena state in the tsd.
* Instead of having a countdown-until-decay timer running for every
* arena in every thread, we flip a coin once per tick, whose
* probability of coming up heads is 1/nticks; this is effectively the
* operation of the ticker_geom_t. Each arena has the same chance of a
* coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
* use a single ticker for all of them.
*/
ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
uint64_t *prng_state = tsd_prng_statep_get(tsd);
if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) {
arena_decay(tsdn, arena, false, false);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
arena_decay_ticks(tsdn, arena, 1);
}
/* Purge a single extent to retained / unmapped directly. */
JEMALLOC_ALWAYS_INLINE void
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_t *extent) {
size_t extent_size = extent_size_get(extent);
extent_dalloc_wrapper(tsdn, arena,
r_extent_hooks, extent);
if (config_stats) {
/* Update stats accordingly. */
arena_stats_lock(tsdn, &arena->stats);
arena_stats_add_u64(tsdn, &arena->stats,
&arena->decay_dirty.stats->nmadvise, 1);
arena_stats_add_u64(tsdn, &arena->stats,
&arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
extent_size);
arena_stats_unlock(tsdn, &arena->stats);
}
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path) {
@ -178,21 +164,19 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
return extent_arena_get(iealloc(tsdn, ptr));
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
unsigned arena_ind = edata_arena_ind_get(edata);
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
assert(szind != SC_NSIZES);
return sz_index2size(szind);
return sz_index2size(alloc_ctx.szind);
}
JEMALLOC_ALWAYS_INLINE size_t
@ -206,26 +190,53 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
* failure.
*/
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent;
szind_t szind;
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &extent, &szind)) {
emap_full_alloc_ctx_t full_alloc_ctx;
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
ptr, &full_alloc_ctx);
if (missing) {
return 0;
}
if (extent == NULL) {
if (full_alloc_ctx.edata == NULL) {
return 0;
}
assert(extent_state_get(extent) == extent_state_active);
assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
assert(edata_addr_get(full_alloc_ctx.edata) == ptr
|| edata_slab_get(full_alloc_ctx.edata));
assert(szind != SC_NSIZES);
assert(full_alloc_ctx.szind != SC_NSIZES);
return sz_index2size(szind);
return sz_index2size(full_alloc_ctx.szind);
}
JEMALLOC_ALWAYS_INLINE bool
large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
if (!config_opt_safety_checks) {
return false;
}
/*
* Eagerly detect double free and sized dealloc bugs for large sizes.
* The cost is low enough (as edata will be accessed anyway) to be
* enabled all the time.
*/
if (unlikely(edata == NULL ||
edata_state_get(edata) != extent_state_active)) {
safety_check_fail("Invalid deallocation detected: "
"pages being freed (%p) not currently active, "
"possibly caused by double free bugs.",
(uintptr_t)edata_addr_get(edata));
return true;
}
size_t input_size = sz_index2size(szind);
if (unlikely(input_size != edata_usize_get(edata))) {
safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
/* true_size */ edata_usize_get(edata), input_size);
return true;
}
return false;
}
static inline void
@ -233,8 +244,13 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
if (large_dalloc_safety_checks(edata, ptr, szind)) {
/* See the comment in isfree. */
return;
}
large_dalloc(tsdn, edata);
}
}
@ -242,27 +258,22 @@ static inline void
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind;
bool slab;
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
true, &szind, &slab);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
}
if (likely(slab)) {
if (likely(alloc_ctx.slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
}
}
@ -277,14 +288,19 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
if (large_dalloc_safety_checks(edata, ptr, szind)) {
/* See the comment in isfree. */
return;
}
large_dalloc(tsdn, edata);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
@ -293,34 +309,30 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
return;
}
szind_t szind;
bool slab;
rtree_ctx_t *rtree_ctx;
if (alloc_ctx != NULL) {
szind = alloc_ctx->szind;
slab = alloc_ctx->slab;
assert(szind != SC_NSIZES);
emap_alloc_ctx_t alloc_ctx;
if (caller_alloc_ctx != NULL) {
alloc_ctx = *caller_alloc_ctx;
} else {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
util_assume(!tsdn_null(tsdn));
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
}
if (config_debug) {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
}
if (likely(slab)) {
if (likely(alloc_ctx.slab)) {
/* Small allocation. */
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
alloc_ctx.szind, slow_path);
} else {
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
slow_path);
}
}
@ -329,47 +341,43 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= SC_LARGE_MAXCLASS);
szind_t szind;
bool slab;
emap_alloc_ctx_t alloc_ctx;
if (!config_prof || !opt_prof) {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < SC_NBINS);
alloc_ctx.szind = sz_size2index(size);
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
assert(szind == sz_size2index(size));
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
assert(alloc_ctx.szind == sz_size2index(size));
assert((config_prof && opt_prof)
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
edata_t *edata = emap_edata_lookup(tsdn,
&arena_emap_global, ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
}
}
if (likely(slab)) {
if (likely(alloc_ctx.slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= SC_LARGE_MAXCLASS);
@ -379,49 +387,164 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
return;
}
szind_t szind;
bool slab;
alloc_ctx_t local_ctx;
emap_alloc_ctx_t alloc_ctx;
if (config_prof && opt_prof) {
if (alloc_ctx == NULL) {
if (caller_alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &local_ctx.szind,
&local_ctx.slab);
assert(local_ctx.szind == sz_size2index(size));
alloc_ctx = &local_ctx;
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind == sz_size2index(size));
} else {
alloc_ctx = *caller_alloc_ctx;
}
slab = alloc_ctx->slab;
szind = alloc_ctx->szind;
} else {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < SC_NBINS);
alloc_ctx.szind = sz_size2index(size);
alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
}
if (config_debug) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
}
if (likely(slab)) {
if (likely(alloc_ctx.slab)) {
/* Small allocation. */
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
alloc_ctx.szind, slow_path);
} else {
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
slow_path);
}
}
static inline void
arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t alignment) {
assert(edata_base_get(edata) == edata_addr_get(edata));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
lg_floor(CACHELINE_CEILING(alignment));
size_t r;
if (!tsdn_null(tsdn)) {
tsd_t *tsd = tsdn_tsd(tsdn);
r = (size_t)prng_lg_range_u64(
tsd_prng_statep_get(tsd), lg_range);
} else {
uint64_t stack_value = (uint64_t)(uintptr_t)&r;
r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
}
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
edata->e_addr = (void *)((uintptr_t)edata->e_addr +
random_offset);
assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
edata->e_addr);
}
}
/*
* The dalloc bin info contains just the information that the common paths need
* during tcache flushes. By force-inlining these paths, and using local copies
* of data (so that the compiler knows it's constant), we avoid a whole bunch of
* redundant loads and stores by leaving this information in registers.
*/
typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
struct arena_dalloc_bin_locked_info_s {
div_info_t div_info;
uint32_t nregs;
uint64_t ndalloc;
};
JEMALLOC_ALWAYS_INLINE size_t
arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
edata_t *slab, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
(uintptr_t)bin_infos[binind].reg_size == 0);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
/* Avoid doing division with a variable divisor. */
regind = div_compute(&info->div_info, diff);
assert(regind < bin_infos[binind].nregs);
return regind;
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
szind_t binind) {
info->div_info = arena_binind_div_info[binind];
info->nregs = bin_infos[binind].nregs;
info->ndalloc = 0;
}
/*
* Does the deallocation work associated with freeing a single pointer (a
* "step") in between a arena_dalloc_bin_locked begin and end call.
*
* Returns true if arena_slab_dalloc must be called on slab. Doesn't do
* stats updates, which happen during finish (this lets running counts get left
* in a register).
*/
JEMALLOC_ALWAYS_INLINE bool
arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
void *ptr) {
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(info, binind, slab, ptr);
slab_data_t *slab_data = edata_slab_data_get(slab);
assert(edata_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
edata_nfree_inc(slab);
if (config_stats) {
info->ndalloc++;
}
unsigned nfree = edata_nfree_get(slab);
if (nfree == bin_info->nregs) {
arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
bin);
return true;
} else if (nfree == 1 && slab != bin->slabcur) {
arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
bin);
}
return false;
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
arena_dalloc_bin_locked_info_t *info) {
if (config_stats) {
bin->stats.ndalloc += info->ndalloc;
assert(bin->stats.curregs >= (size_t)info->ndalloc);
bin->stats.curregs -= (size_t)info->ndalloc;
}
}
static inline bin_t *
arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]);
return shard0 + binshard;
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */

View File

@ -2,77 +2,41 @@
#define JEMALLOC_INTERNAL_ARENA_STATS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif
typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
arena_stats_u64_t nmalloc;
arena_stats_u64_t ndalloc;
locked_u64_t nmalloc;
locked_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
arena_stats_u64_t nrequests; /* Partially derived. */
locked_u64_t nrequests; /* Partially derived. */
/*
* Number of tcache fills / flushes for large (similarly, periodically
* merged). Note that there is no large tcache batch-fill currently
* (i.e. only fill 1 at a time); however flush may be batched.
*/
arena_stats_u64_t nfills; /* Partially derived. */
arena_stats_u64_t nflushes; /* Partially derived. */
locked_u64_t nfills; /* Partially derived. */
locked_u64_t nflushes; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
};
typedef struct arena_stats_decay_s arena_stats_decay_t;
struct arena_stats_decay_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
};
typedef struct arena_stats_extents_s arena_stats_extents_t;
struct arena_stats_extents_s {
/*
* Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
atomic_zu_t ndirty;
atomic_zu_t dirty_bytes;
atomic_zu_t nmuzzy;
atomic_zu_t muzzy_bytes;
atomic_zu_t nretained;
atomic_zu_t retained_bytes;
};
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
@ -80,43 +44,36 @@ struct arena_stats_extents_s {
*/
typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t mapped; /* Partially derived. */
LOCKEDINT_MTX_DECLARE(mtx)
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
* resident includes the base stats -- that's why it lives here and not
* in pa_shard_stats_t.
*/
atomic_zu_t retained; /* Derived. */
size_t base; /* Derived. */
size_t resident; /* Derived. */
size_t metadata_thp; /* Derived. */
size_t mapped; /* Derived. */
/* Number of extent_t structs allocated by base, but not being used. */
atomic_zu_t extent_avail;
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;
atomic_zu_t base; /* Derived. */
atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
atomic_zu_t metadata_thp;
atomic_zu_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nfills_large; /* Derived. */
arena_stats_u64_t nflushes_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
size_t allocated_large; /* Derived. */
uint64_t nmalloc_large; /* Derived. */
uint64_t ndalloc_large; /* Derived. */
uint64_t nfills_large; /* Derived. */
uint64_t nflushes_large; /* Derived. */
uint64_t nrequests_large; /* Derived. */
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm;
/*
* The stats logically owned by the pa_shard in the same arena. This
* lives here only because it's convenient for the purposes of the ctl
* module -- it only knows about the single arena_stats.
*/
pa_shard_stats_t pa_shard_stats;
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
size_t tcache_bytes; /* Derived. */
size_t tcache_stashed_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
assert(((char *)arena_stats)[i] == 0);
}
}
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
#endif
/* Memory is zeroed, so there is no need to clear stats. */
return false;
}
static inline void
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_lock(tsdn, &arena_stats->mtx);
#endif
}
static inline void
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
#endif
}
static inline uint64_t
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return *p;
#endif
}
static inline void
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p += x;
#endif
}
static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p -= x;
assert(*p + x >= *p);
#endif
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static inline void
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
#else
*dst += src;
#endif
}
static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return atomic_load_zu(p, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static inline void
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}
static inline void
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats);
LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
arena_stats_unlock(tsdn, arena_stats);
}
static inline void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
arena_stats_unlock(tsdn, arena_stats);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
&lstats->nrequests, nrequests);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
&lstats->nflushes, 1);
LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
}
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */

View File

@ -0,0 +1,101 @@
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/counter.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/ticker.h"
struct arena_s {
/*
* Number of threads currently assigned to this arena. Each thread has
* two distinct assignments, one for application-serving allocation, and
* the other for internal metadata allocation. Internal metadata must
* not be allocated from arenas explicitly created via the arenas.create
* mallctl, because the arena.<i>.reset mallctl indiscriminately
* discards all allocations for the affected arena.
*
* 0: Application allocation.
* 1: Internal metadata allocation.
*
* Synchronization: atomic.
*/
atomic_u_t nthreads[2];
/* Next bin shard for binding new threads. Synchronization: atomic. */
atomic_u_t binshard_next;
/*
* When percpu_arena is enabled, to amortize the cost of reading /
* updating the current CPU id, track the most recent thread accessing
* this arena, and only read CPU if there is a mismatch.
*/
tsdn_t *last_thd;
/* Synchronization: internal. */
arena_stats_t stats;
/*
* Lists of tcaches and cache_bin_array_descriptors for extant threads
* associated with this arena. Stats from these are merged
* incrementally, and at exit if opt_stats_print is enabled.
*
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_slow_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx;
/*
* Represents a dss_prec_t, but atomically.
*
* Synchronization: atomic.
*/
atomic_u_t dss_prec;
/*
* Extant large allocations.
*
* Synchronization: large_mtx.
*/
edata_list_active_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
/* The page-level allocator shard this arena uses. */
pa_shard_t pa_shard;
/*
* A cached copy of base->ind. This can get accessed on hot paths;
* looking it up in base requires an extra pointer hop / cache miss.
*/
unsigned ind;
/*
* Base allocator, from which arena metadata are allocated.
*
* Synchronization: internal.
*/
base_t *base;
/* Used to determine uptime. Read-only after initialization. */
nstime_t create_time;
/*
* The arena is allocated alongside its bins; really this is a
* dynamically sized array determined by the binshard settings.
*/
bin_t bins[0];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */

View File

@ -1,11 +0,0 @@
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#include "jemalloc/internal/bitmap.h"
struct arena_slab_data_s {
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */

View File

@ -1,232 +0,0 @@
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/ticker.h"
struct arena_decay_s {
/* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool purging;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t time_ms;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* Deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t deadline;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* extents_npages_get(&arena->extents_*) to determine how many dirty
* pages, if any, were generated.
*/
size_t nunpurged;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/*
* Pointer to associated stats. These stats are embedded directly in
* the arena's stats due to how stats structures are shared between the
* arena and ctl code.
*
* Synchronization: Same as associated arena's stats field. */
arena_stats_decay_t *stats;
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages;
};
struct arena_s {
/*
* Number of threads currently assigned to this arena. Each thread has
* two distinct assignments, one for application-serving allocation, and
* the other for internal metadata allocation. Internal metadata must
* not be allocated from arenas explicitly created via the arenas.create
* mallctl, because the arena.<i>.reset mallctl indiscriminately
* discards all allocations for the affected arena.
*
* 0: Application allocation.
* 1: Internal metadata allocation.
*
* Synchronization: atomic.
*/
atomic_u_t nthreads[2];
/* Next bin shard for binding new threads. Synchronization: atomic. */
atomic_u_t binshard_next;
/*
* When percpu_arena is enabled, to amortize the cost of reading /
* updating the current CPU id, track the most recent thread accessing
* this arena, and only read CPU if there is a mismatch.
*/
tsdn_t *last_thd;
/* Synchronization: internal. */
arena_stats_t stats;
/*
* Lists of tcaches and cache_bin_array_descriptors for extant threads
* associated with this arena. Stats from these are merged
* incrementally, and at exit if opt_stats_print is enabled.
*
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx;
/* Synchronization: internal. */
prof_accum_t prof_accum;
/*
* PRNG state for cache index randomization of large allocation base
* pointers.
*
* Synchronization: atomic.
*/
atomic_zu_t offset_state;
/*
* Extent serial number generator state.
*
* Synchronization: atomic.
*/
atomic_zu_t extent_sn_next;
/*
* Represents a dss_prec_t, but atomically.
*
* Synchronization: atomic.
*/
atomic_u_t dss_prec;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t nactive;
/*
* Extant large allocations.
*
* Synchronization: large_mtx.
*/
extent_list_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
extents_t extents_dirty;
extents_t extents_muzzy;
extents_t extents_retained;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: internal.
*/
arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*
* Synchronization: extent_grow_mtx
*/
pszind_t extent_grow_next;
pszind_t retain_grow_limit;
malloc_mutex_t extent_grow_mtx;
/*
* Available extent structures that were allocated via
* base_alloc_extent().
*
* Synchronization: extent_avail_mtx.
*/
extent_tree_t extent_avail;
atomic_zu_t extent_avail_cnt;
malloc_mutex_t extent_avail_mtx;
/*
* bins is used to store heaps of free regions.
*
* Synchronization: internal.
*/
bins_t bins[SC_NBINS];
/*
* Base allocator, from which arena metadata are allocated.
*
* Synchronization: internal.
*/
base_t *base;
/* Used to determine uptime. Read-only after initialization. */
nstime_t create_time;
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
struct arena_tdata_s {
ticker_t decay_ticker;
};
/* Used to pass rtree lookup context down the path. */
struct alloc_ctx_s {
szind_t szind;
bool slab;
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */

View File

@ -3,21 +3,14 @@
#include "jemalloc/internal/sc.h"
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef struct alloc_ctx_s alloc_ctx_t;
typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */
@ -48,4 +41,18 @@ typedef enum {
*/
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
struct arena_config_s {
/* extent hooks to be used for the arena */
extent_hooks_t *extent_hooks;
/*
* Use extent hooks for metadata (base) allocations when true.
*/
bool metadata_use_hooks;
};
typedef struct arena_config_s arena_config_t;
extern const arena_config_t arena_config_default;
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */

View File

@ -51,6 +51,27 @@
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
/*
* Another convenience -- simple atomic helper functions.
*/
#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
lg_size) \
JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
ATOMIC_INLINE void \
atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval + inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
} \
ATOMIC_INLINE void \
atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval - inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
}
/*
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
* fact.
@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
*/
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
#ifdef JEMALLOC_ATOMIC_U64
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
#endif
#undef ATOMIC_INLINE

View File

@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new);
bool background_thread_is_started(background_thread_info_t* info);
void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
#endif
bool background_thread_boot0(void);
bool background_thread_boot1(tsdn_t *tsdn);
bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */

View File

@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) {
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE void
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread) {
if (!background_thread_enabled() || is_background_thread) {
return;
}
background_thread_info_t *info =
arena_background_thread_info_get(arena);
if (background_thread_indefinite_sleep(info)) {
background_thread_interval_check(tsdn, arena,
&arena->decay_dirty, 0);
}
}
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */

View File

@ -11,6 +11,17 @@
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
#define DEFAULT_NUM_BACKGROUND_THREAD 4
/*
* These exist only as a transitional state. Eventually, deferral should be
* part of the PAI, and each implementation can indicate wait times with more
* specificity.
*/
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
typedef enum {
background_thread_stopped,
background_thread_started,
@ -48,6 +59,7 @@ struct background_thread_stats_s {
size_t num_threads;
uint64_t num_runs;
nstime_t run_interval;
mutex_prof_data_t max_counter_per_bg_thd;
};
typedef struct background_thread_stats_s background_thread_stats_t;

View File

@ -0,0 +1,110 @@
#ifndef JEMALLOC_INTERNAL_BASE_H
#define JEMALLOC_INTERNAL_BASE_H
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/mutex.h"
enum metadata_thp_mode_e {
metadata_thp_disabled = 0,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_mode_limit = 3
};
typedef enum metadata_thp_mode_e metadata_thp_mode_t;
#define METADATA_THP_DEFAULT metadata_thp_disabled
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *metadata_thp_mode_names[];
/* Embedded at the beginning of every block of base-managed virtual memory. */
typedef struct base_block_s base_block_t;
struct base_block_s {
/* Total size of block's virtual memory mapping. */
size_t size;
/* Next block in list of base's blocks. */
base_block_t *next;
/* Tracks unused trailing space. */
edata_t edata;
};
typedef struct base_s base_t;
struct base_s {
/*
* User-configurable extent hook functions.
*/
ehooks_t ehooks;
/*
* User-configurable extent hook functions for metadata allocations.
*/
ehooks_t ehooks_base;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx;
/* Using THP when true (metadata_thp auto mode). */
bool auto_thp_switched;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t pind_last;
/* Serial number generation state. */
size_t extent_sn_next;
/* Chain of all blocks associated with base. */
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
edata_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t resident;
size_t mapped;
/* Number of THP regions touched. */
size_t n_thp;
};
static inline unsigned
base_ind_get(const base_t *base) {
return ehooks_ind_get(&base->ehooks);
}
static inline bool
metadata_thp_enabled(void) {
return (opt_metadata_thp != metadata_thp_disabled);
}
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind,
const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
ehooks_t *base_ehooks_get(base_t *base);
ehooks_t *base_ehooks_get_for_metadata(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_H */

View File

@ -1,22 +0,0 @@
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *metadata_thp_mode_names[];
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
extent_hooks_t *base_extent_hooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */

View File

@ -1,13 +0,0 @@
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static inline unsigned
base_ind_get(const base_t *base) {
return base->ind;
}
static inline bool
metadata_thp_enabled(void) {
return (opt_metadata_thp != metadata_thp_disabled);
}
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */

View File

@ -1,59 +0,0 @@
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct base_block_s {
/* Total size of block's virtual memory mapping. */
size_t size;
/* Next block in list of base's blocks. */
base_block_t *next;
/* Tracks unused trailing space. */
extent_t extent;
};
struct base_s {
/* Associated arena's index within the arenas array. */
unsigned ind;
/*
* User-configurable extent hook functions. Points to an
* extent_hooks_t.
*/
atomic_p_t extent_hooks;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx;
/* Using THP when true (metadata_thp auto mode). */
bool auto_thp_switched;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t pind_last;
/* Serial number generation state. */
size_t extent_sn_next;
/* Chain of all blocks associated with base. */
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t resident;
size_t mapped;
/* Number of THP regions touched. */
size_t n_thp;
};
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */

View File

@ -1,33 +0,0 @@
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
#define JEMALLOC_INTERNAL_BASE_TYPES_H
typedef struct base_block_s base_block_t;
typedef struct base_s base_t;
#define METADATA_THP_DEFAULT metadata_thp_disabled
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
typedef enum {
metadata_thp_disabled = 0,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_mode_limit = 3
} metadata_thp_mode_t;
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */

View File

@ -3,8 +3,7 @@
#include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
@ -12,74 +11,34 @@
* A bin contains a set of extents that are currently being used for slab
* allocations.
*/
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern bin_info_t bin_infos[SC_NBINS];
typedef struct bin_s bin_t;
struct bin_s {
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t lock;
/*
* Bin statistics. These get touched every time the lock is acquired,
* so put them close by in the hopes of getting some cache locality.
*/
bin_stats_t stats;
/*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t *slabcur;
edata_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t slabs_nonfull;
edata_heap_t slabs_nonfull;
/* List used to track full slabs. */
extent_list_t slabs_full;
/* Bin statistics. */
bin_stats_t stats;
edata_list_active_t slabs_full;
};
/* A set of sharded bins of the same size class. */
@ -92,7 +51,6 @@ struct bins_s {
void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards);
void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
/* Initializes a bin to empty. Returns true on error. */
bool bin_init(bin_t *bin);
@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
/* Stats. */
static inline void
bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
dst_bin_stats->nmalloc += bin->stats.nmalloc;
dst_bin_stats->ndalloc += bin->stats.ndalloc;
dst_bin_stats->nrequests += bin->stats.nrequests;
dst_bin_stats->curregs += bin->stats.curregs;
dst_bin_stats->nfills += bin->stats.nfills;
dst_bin_stats->nflushes += bin->stats.nflushes;
dst_bin_stats->nslabs += bin->stats.nslabs;
dst_bin_stats->reslabs += bin->stats.reslabs;
dst_bin_stats->curslabs += bin->stats.curslabs;
dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
bin_stats_t *stats = &dst_bin_stats->stats_data;
stats->nmalloc += bin->stats.nmalloc;
stats->ndalloc += bin->stats.ndalloc;
stats->nrequests += bin->stats.nrequests;
stats->curregs += bin->stats.curregs;
stats->nfills += bin->stats.nfills;
stats->nflushes += bin->stats.nflushes;
stats->nslabs += bin->stats.nslabs;
stats->reslabs += bin->stats.reslabs;
stats->curslabs += bin->stats.curslabs;
stats->nonfull_slabs += bin->stats.nonfull_slabs;
malloc_mutex_unlock(tsdn, &bin->lock);
}

View File

@ -0,0 +1,50 @@
#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
#define JEMALLOC_INTERNAL_BIN_INFO_H
#include "jemalloc/internal/bitmap.h"
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern bin_info_t bin_infos[SC_NBINS];
void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */

View File

@ -47,8 +47,11 @@ struct bin_stats_s {
/* Current size of nonfull slabs heap in this bin. */
size_t nonfull_slabs;
mutex_prof_data_t mutex_data;
};
typedef struct bin_stats_data_s bin_stats_data_t;
struct bin_stats_data_s {
bin_stats_t stats_data;
mutex_prof_data_t mutex_data;
};
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */

View File

@ -3,7 +3,7 @@
#include "jemalloc/internal/sc.h"
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
#define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */

View File

@ -3,144 +3,383 @@
#include "jemalloc/internal/assert.h"
#define BIT_UTIL_INLINE static inline
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
BIT_UTIL_INLINE unsigned
ffs_llu(unsigned long long bitmap) {
return JEMALLOC_INTERNAL_FFSLL(bitmap);
/*
* Unlike the builtins and posix ffs functions, our ffs requires a non-zero
* input, and returns the position of the lowest bit set (as opposed to the
* posix versions, which return 1 larger than that position and use a return
* value of zero as a sentinel. This tends to simplify logic in callers, and
* allows for consistency with the builtins we build fls on top of.
*/
static inline unsigned
ffs_llu(unsigned long long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSLL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_FFSL(bitmap);
static inline unsigned
ffs_lu(unsigned long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_u(unsigned bitmap) {
return JEMALLOC_INTERNAL_FFS(bitmap);
static inline unsigned
ffs_u(unsigned x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFS(x) - 1;
}
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
BIT_UTIL_INLINE unsigned
popcount_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
#define DO_FLS_SLOW(x, suffix) do { \
util_assume(x != 0); \
x |= (x >> 1); \
x |= (x >> 2); \
x |= (x >> 4); \
x |= (x >> 8); \
x |= (x >> 16); \
if (sizeof(x) > 4) { \
/* \
* If sizeof(x) is 4, then the expression "x >> 32" \
* will generate compiler warnings even if the code \
* never executes. This circumvents the warning, and \
* gets compiled out in optimized builds. \
*/ \
int constant_32 = sizeof(x) * 4; \
x |= (x >> constant_32); \
} \
x++; \
if (x == 0) { \
return 8 * sizeof(x) - 1; \
} \
return ffs_##suffix(x) - 1; \
} while(0)
static inline unsigned
fls_llu_slow(unsigned long long x) {
DO_FLS_SLOW(x, llu);
}
static inline unsigned
fls_lu_slow(unsigned long x) {
DO_FLS_SLOW(x, lu);
}
static inline unsigned
fls_u_slow(unsigned x) {
DO_FLS_SLOW(x, u);
}
#undef DO_FLS_SLOW
#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
static inline unsigned
fls_llu(unsigned long long x) {
util_assume(x != 0);
/*
* Note that the xor here is more naturally written as subtraction; the
* last bit set is the number of bits in the type minus the number of
* leading zero bits. But GCC implements that as:
* bsr edi, edi
* mov eax, 31
* xor edi, 31
* sub eax, edi
* If we write it as xor instead, then we get
* bsr eax, edi
* as desired.
*/
return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
}
static inline unsigned
fls_lu(unsigned long x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
}
static inline unsigned
fls_u(unsigned x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
}
#elif defined(_MSC_VER)
#if LG_SIZEOF_PTR == 3
#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
#else
/*
* This never actually runs; we're just dodging a compiler error for the
* never-taken branch where sizeof(void *) == 8.
*/
#define DO_BSR64(bit, x) bit = 0; unreachable()
#endif
#define DO_FLS(x) do { \
if (x == 0) { \
return 8 * sizeof(x); \
} \
unsigned long bit; \
if (sizeof(x) == 4) { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 8) { \
DO_BSR64(bit, x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 4) { \
/* Dodge a compiler warning, as above. */ \
int constant_32 = sizeof(x) * 4; \
if (_BitScanReverse(&bit, \
(unsigned)(x >> constant_32))) { \
return 32 + (unsigned)bit; \
} else { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
} \
unreachable(); \
} while (0)
static inline unsigned
fls_llu(unsigned long long x) {
DO_FLS(x);
}
static inline unsigned
fls_lu(unsigned long x) {
DO_FLS(x);
}
static inline unsigned
fls_u(unsigned x) {
DO_FLS(x);
}
#undef DO_FLS
#undef DO_BSR64
#else
static inline unsigned
fls_llu(unsigned long long x) {
return fls_llu_slow(x);
}
static inline unsigned
fls_lu(unsigned long x) {
return fls_lu_slow(x);
}
static inline unsigned
fls_u(unsigned x) {
return fls_u_slow(x);
}
#endif
#if LG_SIZEOF_LONG_LONG > 3
# error "Haven't implemented popcount for 16-byte ints."
#endif
#define DO_POPCOUNT(x, type) do { \
/* \
* Algorithm from an old AMD optimization reference manual. \
* We're putting a little bit more work than you might expect \
* into the no-instrinsic case, since we only support the \
* GCC intrinsics spelling of popcount (for now). Detecting \
* whether or not the popcount builtin is actually useable in \
* MSVC is nontrivial. \
*/ \
\
type bmul = (type)0x0101010101010101ULL; \
\
/* \
* Replace each 2 bits with the sideways sum of the original \
* values. 0x5 = 0b0101. \
* \
* You might expect this to be: \
* x = (x & 0x55...) + ((x >> 1) & 0x55...). \
* That costs an extra mask relative to this, though. \
*/ \
x = x - ((x >> 1) & (0x55U * bmul)); \
/* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\
x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \
/* \
* Replace each 8 bits with their sideways sum. Note that we \
* can't overflow within each 4-bit sum here, so we can skip \
* the initial mask. \
*/ \
x = (x + (x >> 4)) & (bmul * 0x0FU); \
/* \
* None of the partial sums in this multiplication (viewed in \
* base-256) can overflow into the next digit. So the least \
* significant byte of the product will be the least \
* significant byte of the original value, the second least \
* significant byte will be the sum of the two least \
* significant bytes of the original value, and so on. \
* Importantly, the high byte will be the byte-wise sum of all \
* the bytes of the original value. \
*/ \
x = x * bmul; \
x >>= ((sizeof(x) - 1) * 8); \
return (unsigned)x; \
} while(0)
static inline unsigned
popcount_u_slow(unsigned bitmap) {
DO_POPCOUNT(bitmap, unsigned);
}
static inline unsigned
popcount_lu_slow(unsigned long bitmap) {
DO_POPCOUNT(bitmap, unsigned long);
}
static inline unsigned
popcount_llu_slow(unsigned long long bitmap) {
DO_POPCOUNT(bitmap, unsigned long long);
}
#undef DO_POPCOUNT
static inline unsigned
popcount_u(unsigned bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNT
return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
#else
return popcount_u_slow(bitmap);
#endif
}
static inline unsigned
popcount_lu(unsigned long bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
#else
return popcount_lu_slow(bitmap);
#endif
}
static inline unsigned
popcount_llu(unsigned long long bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
#else
return popcount_llu_slow(bitmap);
#endif
}
/*
* Clears first unset bit in bitmap, and returns
* place of bit. bitmap *must not* be 0.
*/
BIT_UTIL_INLINE size_t
static inline size_t
cfs_lu(unsigned long* bitmap) {
size_t bit = ffs_lu(*bitmap) - 1;
util_assume(*bitmap != 0);
size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit;
return bit;
}
BIT_UTIL_INLINE unsigned
ffs_zu(size_t bitmap) {
static inline unsigned
ffs_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return ffs_u(bitmap);
return ffs_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return ffs_lu(bitmap);
return ffs_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return ffs_llu(bitmap);
return ffs_llu(x);
#else
#error No implementation for size_t ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u64(uint64_t bitmap) {
static inline unsigned
fls_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return fls_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return fls_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return fls_llu(x);
#else
#error No implementation for size_t fls()
#endif
}
static inline unsigned
ffs_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
return ffs_lu(bitmap);
return ffs_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
return ffs_llu(bitmap);
return ffs_llu(x);
#else
#error No implementation for 64-bit ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u32(uint32_t bitmap) {
static inline unsigned
fls_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
return fls_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
return fls_llu(x);
#else
#error No implementation for 64-bit fls()
#endif
}
static inline unsigned
ffs_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return ffs_u(bitmap);
return ffs_u(x);
#else
#error No implementation for 32-bit ffs()
#endif
return ffs_u(bitmap);
return ffs_u(x);
}
BIT_UTIL_INLINE uint64_t
static inline unsigned
fls_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return fls_u(x);
#else
#error No implementation for 32-bit fls()
#endif
return fls_u(x);
}
static inline uint64_t
pow2_ceil_u64(uint64_t x) {
#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ))
if(unlikely(x <= 1)) {
if (unlikely(x <= 1)) {
return x;
}
size_t msb_on_index;
#if (defined(__amd64__) || defined(__x86_64__))
asm ("bsrq %1, %0"
: "=r"(msb_on_index) // Outputs.
: "r"(x-1) // Inputs.
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (63 ^ __builtin_clzll(x - 1));
#endif
size_t msb_on_index = fls_u64(x - 1);
/*
* Range-check; it's on the callers to ensure that the result of this
* call won't overflow.
*/
assert(msb_on_index < 63);
return 1ULL << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x++;
return x;
#endif
}
BIT_UTIL_INLINE uint32_t
static inline uint32_t
pow2_ceil_u32(uint32_t x) {
#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__)))
if(unlikely(x <= 1)) {
return x;
if (unlikely(x <= 1)) {
return x;
}
size_t msb_on_index;
#if (defined(__i386__))
asm ("bsr %1, %0"
: "=r"(msb_on_index) // Outputs.
: "r"(x-1) // Inputs.
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (31 ^ __builtin_clz(x - 1));
#endif
size_t msb_on_index = fls_u32(x - 1);
/* As above. */
assert(msb_on_index < 31);
return 1U << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return x;
#endif
}
/* Compute the smallest power of 2 that is >= x. */
BIT_UTIL_INLINE size_t
static inline size_t
pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3)
return pow2_ceil_u64(x);
@ -149,77 +388,21 @@ pow2_ceil_zu(size_t x) {
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
BIT_UTIL_INLINE unsigned
static inline unsigned
lg_floor(size_t x) {
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(_MSC_VER))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
unsigned long ret;
assert(x != 0);
util_assume(x != 0);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
return fls_u64(x);
#else
# error "Unsupported type size for lg_floor()"
#endif
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
#else
# error "Unsupported type size for lg_floor()"
return fls_u32(x);
#endif
}
#else
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX) {
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return ffs_zu(x) - 2;
}
#endif
BIT_UTIL_INLINE unsigned
static inline unsigned
lg_ceil(size_t x) {
return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
}
#undef BIT_UTIL_INLINE
/* A compile-time version of lg_floor and lg_ceil. */
#define LG_FLOOR_1(x) 0
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))

View File

@ -1,7 +1,6 @@
#ifndef JEMALLOC_INTERNAL_BITMAP_H
#define JEMALLOC_INTERNAL_BITMAP_H
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/sc.h"
@ -9,9 +8,9 @@ typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
@ -273,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
@ -285,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- 1);
size_t bit;
do {
bit = ffs_lu(g);
if (bit != 0) {
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
if (g != 0) {
bit = ffs_lu(g);
return (i << LG_BITMAP_GROUP_NBITS) + bit;
}
i++;
g = bitmap[i];
@ -308,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
bit = ffs_lu(g);
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
while (g == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
#endif
bitmap_set(bitmap, binfo, bit);
return bit;

Some files were not shown because too many files have changed in this diff Show More