Correct bytes_per_key computing. (#12897)

Change the calculation method of bytes_per_key to make it closer to
the true average key size. The calculation method is as follows:

mh->bytes_per_key = mh->total_keys ? (mh->dataset / mh->total_keys) : 0;
This commit is contained in:
Chen Tianjie 2024-01-12 11:58:53 +08:00 committed by GitHub
parent 964f4a4576
commit 87786342a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 1 additions and 1 deletions

View File

@ -1274,7 +1274,7 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
if (zmalloc_used > mh->startup_allocated)
net_usage = zmalloc_used - mh->startup_allocated;
mh->dataset_perc = (float)mh->dataset*100/net_usage;
mh->bytes_per_key = mh->total_keys ? (net_usage / mh->total_keys) : 0;
mh->bytes_per_key = mh->total_keys ? (mh->dataset / mh->total_keys) : 0;
return mh;
}