Allocate Lua VM code with jemalloc instead of libc, and count it used memory (#13133)

## Background
1. Currently Lua memory control does not pass through Redis's zmalloc.c.
Redis maxmemory cannot limit memory problems caused by users abusing lua
since these lua VM memory is not part of used_memory.

2. Since jemalloc is much better (fragmentation and speed), and also we
know it and trust it. we are
going to use jemalloc instead of libc to allocate the Lua VM code and
count it used memory.

## Process:
In this PR, we will use jemalloc in lua. 
1. Create an arena for all lua vm (script and function), which is
shared, in order to avoid blocking defragger.
2. Create a bound tcache for the lua VM, since the lua VM and the main
thread are by default in the same tcache, and if there is no isolated
tcache, lua may request memory from the tcache which has just been freed
by main thread, and vice versa
On the other hand, since lua vm might be release in bio thread, but
tcache is not thread-safe, we need to recreate
    the tcache every time we recreate the lua vm.
3. Remove lua memory statistics from memory fragmentation statistics to
avoid the effects of lua memory fragmentation

## Other
Add the following new fields to `INFO DEBUG` (we may promote them to
INFO MEMORY some day)
1. allocator_allocated_lua: total number of bytes allocated of lua arena
2. allocator_active_lua: total number of bytes in active pages allocated
in lua arena
3. allocator_resident_lua: maximum number of bytes in physically
resident data pages mapped in lua arena
4. allocator_frag_bytes_lua: fragment bytes in lua arena

This is oranagra's idea, and i got some help from sundb.

This solves the third point in #13102.

---------

Co-authored-by: debing.sun <debing.sun@redis.com>
Co-authored-by: Oran Agra <oran@redislabs.com>
This commit is contained in:
Binbin 2024-04-16 17:43:33 +08:00 committed by GitHub
parent e3550f01dd
commit 804110a487
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 276 additions and 65 deletions

View File

@ -468,9 +468,9 @@ Script
The script unit is composed of 3 units:
* `script.c` - integration of scripts with Redis (commands execution, set replication/resp, ...)
* `script_lua.c` - responsible to execute Lua code, uses script.c to interact with Redis from within the Lua code.
* `function_lua.c` - contains the Lua engine implementation, uses script_lua.c to execute the Lua code.
* `functions.c` - contains Redis Functions implementation (FUNCTION command), uses functions_lua.c if the function it wants to invoke needs the Lua engine.
* `script_lua.c` - responsible to execute Lua code, uses `script.c` to interact with Redis from within the Lua code.
* `function_lua.c` - contains the Lua engine implementation, uses `script_lua.c` to execute the Lua code.
* `functions.c` - contains Redis Functions implementation (`FUNCTION` command), uses `functions_lua.c` if the function it wants to invoke needs the Lua engine.
* `eval.c` - contains the `eval` implementation using `script_lua.c` to invoke the Lua code.

View File

@ -767,7 +767,16 @@ void defragScanCallback(void *privdata, const dictEntry *de) {
* without the possibility of getting any results. */
float getAllocatorFragmentation(size_t *out_frag_bytes) {
size_t resident, active, allocated, frag_smallbins_bytes;
zmalloc_get_allocator_info(&allocated, &active, &resident, NULL, NULL, &frag_smallbins_bytes);
zmalloc_get_allocator_info(1, &allocated, &active, &resident, NULL, NULL, &frag_smallbins_bytes);
if (server.lua_arena != UINT_MAX) {
size_t lua_resident, lua_active, lua_allocated, lua_frag_smallbins_bytes;
zmalloc_get_allocator_info_by_arena(server.lua_arena, 0, &lua_allocated, &lua_active, &lua_resident, &lua_frag_smallbins_bytes);
resident -= lua_resident;
active -= lua_active;
allocated -= lua_allocated;
frag_smallbins_bytes -= lua_frag_smallbins_bytes;
}
/* Calculate the fragmentation ratio as the proportion of wasted memory in small
* bins (which are defraggable) relative to the total allocated memory (including large bins).

View File

@ -17,6 +17,9 @@
#include <lua.h>
#include <lauxlib.h>
#include <lualib.h>
#if defined(USE_JEMALLOC)
#include <lstate.h>
#endif
#include <ctype.h>
#include <math.h>
@ -162,14 +165,18 @@ int luaRedisReplicateCommandsCommand(lua_State *lua) {
*
* However it is simpler to just call scriptingReset() that does just that. */
void scriptingInit(int setup) {
lua_State *lua = lua_open();
if (setup) {
lctx.lua_client = NULL;
server.script_disable_deny_script = 0;
ldbInit();
}
lua_State *lua = createLuaState();
if (lua == NULL) {
serverLog(LL_WARNING, "Failed creating the lua VM.");
exit(1);
}
/* Initialize a dictionary we use to map SHAs to scripts.
* Initialize a list we use for lua script evictions, it shares the
* sha with the dictionary, so free fn is not set. */
@ -252,16 +259,11 @@ void freeLuaScriptsSync(dict *lua_scripts, list *lua_scripts_lru_list, lua_State
listRelease(lua_scripts_lru_list);
lua_close(lua);
#if !defined(USE_LIBC)
/* The lua interpreter may hold a lot of memory internally, and lua is
* using libc. libc may take a bit longer to return the memory to the OS,
* so after lua_close, we call malloc_trim try to purge it earlier.
*
* We do that only when Redis itself does not use libc. When Lua and Redis
* use different allocators, one won't use the fragmentation holes of the
* other, and released memory can take a long time until it is returned to
* the OS. */
zlibc_trim();
#if defined(USE_JEMALLOC)
/* When lua is closed, destroy the previously used private tcache. */
void *ud = (global_State*)G(lua)->ud;
unsigned int lua_tcache = (unsigned int)(uintptr_t)ud;
je_mallctl("tcache.destroy", NULL, NULL, (void *)&lua_tcache, sizeof(unsigned int));
#endif
}

View File

@ -402,7 +402,7 @@ static int luaRegisterFunction(lua_State *lua) {
/* Initialize Lua engine, should be called once on start. */
int luaEngineInitEngine(void) {
luaEngineCtx *lua_engine_ctx = zmalloc(sizeof(*lua_engine_ctx));
lua_engine_ctx->lua = lua_open();
lua_engine_ctx->lua = createLuaState();
luaRegisterRedisAPI(lua_engine_ctx->lua);

View File

@ -1162,10 +1162,15 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
(float)server.cron_malloc_stats.process_rss / server.cron_malloc_stats.zmalloc_used;
mh->total_frag_bytes =
server.cron_malloc_stats.process_rss - server.cron_malloc_stats.zmalloc_used;
mh->allocator_frag =
(float)server.cron_malloc_stats.allocator_frag_smallbins_bytes / server.cron_malloc_stats.allocator_allocated + 1;
mh->allocator_frag_bytes =
server.cron_malloc_stats.allocator_frag_smallbins_bytes;
/* Starting with redis 8.0, the lua memory is part of the total memory usage
* of redis, and that includes RSS and all other memory metrics. We only want
* to deduct it from active defrag. */
size_t frag_smallbins_bytes =
server.cron_malloc_stats.allocator_frag_smallbins_bytes - server.cron_malloc_stats.lua_allocator_frag_smallbins_bytes;
size_t allocated =
server.cron_malloc_stats.allocator_allocated - server.cron_malloc_stats.lua_allocator_allocated;
mh->allocator_frag = (float)frag_smallbins_bytes / allocated + 1;
mh->allocator_frag_bytes = frag_smallbins_bytes;
mh->allocator_rss =
(float)server.cron_malloc_stats.allocator_resident / server.cron_malloc_stats.allocator_active;
mh->allocator_rss_bytes =

View File

@ -10,6 +10,9 @@
#include "script.h"
#include "cluster.h"
#include <lua.h>
#include <lauxlib.h>
scriptFlag scripts_flags_def[] = {
{.flag = SCRIPT_FLAG_NO_WRITES, .str = "no-writes"},
{.flag = SCRIPT_FLAG_ALLOW_OOM, .str = "allow-oom"},
@ -39,6 +42,63 @@ static void enterScriptTimedoutMode(scriptRunCtx *run_ctx) {
blockingOperationStarts();
}
#if defined(USE_JEMALLOC)
/* When lua uses jemalloc, pass in luaAlloc as a parameter of lua_newstate. */
static void *luaAlloc(void *ud, void *ptr, size_t osize, size_t nsize) {
UNUSED(osize);
unsigned int tcache = (unsigned int)(uintptr_t)ud;
if (nsize == 0) {
zfree_with_flags(ptr, MALLOCX_ARENA(server.lua_arena) | MALLOCX_TCACHE(tcache));
return NULL;
} else {
return zrealloc_with_flags(ptr, nsize, MALLOCX_ARENA(server.lua_arena) | MALLOCX_TCACHE(tcache));
}
}
/* Create a lua interpreter, and use jemalloc as lua memory allocator. */
lua_State *createLuaState(void) {
/* Every time a lua VM is created, a new private tcache is created for use.
* This private tcache will be destroyed after the lua VM is closed. */
unsigned int tcache;
size_t sz = sizeof(unsigned int);
int err = je_mallctl("tcache.create", (void *)&tcache, &sz, NULL, 0);
if (err) {
serverLog(LL_WARNING, "Failed creating the lua jemalloc tcache.");
exit(1);
}
/* We pass tcache as ud so that it is not bound to the server. */
return lua_newstate(luaAlloc, (void *)(uintptr_t)tcache);
}
/* Under jemalloc we need to create a new arena for lua to avoid blocking
* defragger. */
void luaEnvInit(void) {
unsigned int arena;
size_t sz = sizeof(unsigned int);
int err = je_mallctl("arenas.create", (void *)&arena, &sz, NULL, 0);
if (err) {
serverLog(LL_WARNING, "Failed creating the lua jemalloc arena.");
exit(1);
}
server.lua_arena = arena;
}
#else
/* Create a lua interpreter and use glibc (default) as lua memory allocator. */
lua_State *createLuaState(void) {
return lua_open();
}
/* There is nothing to set up under glib. */
void luaEnvInit(void) {
server.lua_arena = UINT_MAX;
}
#endif
int scriptIsTimedout(void) {
return scriptIsRunning() && (curr_run_ctx->flags & SCRIPT_TIMEDOUT);
}

View File

@ -72,6 +72,8 @@ typedef struct scriptFlag {
extern scriptFlag scripts_flags_def[];
void luaEnvInit(void);
lua_State *createLuaState(void);
uint64_t scriptFlagsToCmdFlags(uint64_t cmd_flags, uint64_t script_flags);
int scriptPrepareForRun(scriptRunCtx *r_ctx, client *engine_client, client *caller, const char *funcname, uint64_t script_flags, int ro);
void scriptResetRun(scriptRunCtx *r_ctx);

View File

@ -1207,21 +1207,25 @@ void cronUpdateMemoryStats(void) {
* The fragmentation ratio it'll show is potentially more accurate
* it excludes other RSS pages such as: shared libraries, LUA and other non-zmalloc
* allocations, and allocator reserved pages that can be pursed (all not actual frag) */
zmalloc_get_allocator_info(&server.cron_malloc_stats.allocator_allocated,
zmalloc_get_allocator_info(1,
&server.cron_malloc_stats.allocator_allocated,
&server.cron_malloc_stats.allocator_active,
&server.cron_malloc_stats.allocator_resident,
NULL,
&server.cron_malloc_stats.allocator_muzzy,
&server.cron_malloc_stats.allocator_frag_smallbins_bytes);
if (server.lua_arena != UINT_MAX) {
zmalloc_get_allocator_info_by_arena(server.lua_arena,
0,
&server.cron_malloc_stats.lua_allocator_allocated,
&server.cron_malloc_stats.lua_allocator_active,
&server.cron_malloc_stats.lua_allocator_resident,
&server.cron_malloc_stats.lua_allocator_frag_smallbins_bytes);
}
/* in case the allocator isn't providing these stats, fake them so that
* fragmentation info still shows some (inaccurate metrics) */
if (!server.cron_malloc_stats.allocator_resident) {
/* LUA memory isn't part of zmalloc_used, but it is part of the process RSS,
* so we must deduct it in order to be able to calculate correct
* "allocator fragmentation" ratio */
size_t lua_memory = evalMemory();
server.cron_malloc_stats.allocator_resident = server.cron_malloc_stats.process_rss - lua_memory;
}
if (!server.cron_malloc_stats.allocator_resident)
server.cron_malloc_stats.allocator_resident = server.cron_malloc_stats.process_rss;
if (!server.cron_malloc_stats.allocator_active)
server.cron_malloc_stats.allocator_active = server.cron_malloc_stats.allocator_resident;
if (!server.cron_malloc_stats.allocator_allocated)
@ -2756,6 +2760,7 @@ void initServer(void) {
server.maxmemory_policy = MAXMEMORY_NO_EVICTION;
}
luaEnvInit();
scriptingInit(1);
if (functionsInit() == C_ERR) {
serverPanic("Functions initialization failed, check the server logs.");
@ -6120,7 +6125,11 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
"eventloop_duration_aof_sum:%llu\r\n", server.duration_stats[EL_DURATION_TYPE_AOF].sum,
"eventloop_duration_cron_sum:%llu\r\n", server.duration_stats[EL_DURATION_TYPE_CRON].sum,
"eventloop_duration_max:%llu\r\n", server.duration_stats[EL_DURATION_TYPE_EL].max,
"eventloop_cmd_per_cycle_max:%lld\r\n", server.el_cmd_cnt_max));
"eventloop_cmd_per_cycle_max:%lld\r\n", server.el_cmd_cnt_max,
"allocator_allocated_lua:%zu\r\n", server.cron_malloc_stats.lua_allocator_allocated,
"allocator_active_lua:%zu\r\n", server.cron_malloc_stats.lua_allocator_active,
"allocator_resident_lua:%zu\r\n", server.cron_malloc_stats.lua_allocator_resident,
"allocator_frag_bytes_lua:%zu\r\n", server.cron_malloc_stats.lua_allocator_frag_smallbins_bytes));
}
return info;

View File

@ -1455,6 +1455,10 @@ struct malloc_stats {
size_t allocator_resident;
size_t allocator_muzzy;
size_t allocator_frag_smallbins_bytes;
size_t lua_allocator_allocated;
size_t lua_allocator_active;
size_t lua_allocator_resident;
size_t lua_allocator_frag_smallbins_bytes;
};
/*-----------------------------------------------------------------------------
@ -2008,6 +2012,7 @@ struct redisServer {
int cluster_drop_packet_filter; /* Debug config that allows tactically
* dropping packets of a specific type */
/* Scripting */
unsigned int lua_arena; /* eval lua arena used in jemalloc. */
mstime_t busy_reply_threshold; /* Script / module timeout in milliseconds */
int pre_command_oom_state; /* OOM before command (script?) was started */
int script_disable_deny_script; /* Allow running commands marked "noscript" inside a script. */

View File

@ -63,6 +63,7 @@ void zlibc_free(void *ptr) {
#define realloc(ptr,size) je_realloc(ptr,size)
#define free(ptr) je_free(ptr)
#define mallocx(size,flags) je_mallocx(size,flags)
#define rallocx(ptr,size,flags) je_rallocx(ptr,size,flags)
#define dallocx(ptr,flags) je_dallocx(ptr,flags)
#endif
@ -145,6 +146,53 @@ void *zmalloc_usable(size_t size, size_t *usable) {
return ptr;
}
#if defined(USE_JEMALLOC)
void *zmalloc_with_flags(size_t size, int flags) {
if (size >= SIZE_MAX/2) zmalloc_oom_handler(size);
void *ptr = mallocx(size+PREFIX_SIZE, flags);
if (!ptr) zmalloc_oom_handler(size);
update_zmalloc_stat_alloc(zmalloc_size(ptr));
return ptr;
}
void *zrealloc_with_flags(void *ptr, size_t size, int flags) {
/* Not allocating anything, just redirect to free. */
if (size == 0 && ptr != NULL) {
zfree_with_flags(ptr, flags);
return NULL;
}
/* Not freeing anything, just redirect to malloc. */
if (ptr == NULL)
return zmalloc_with_flags(size, flags);
/* Possible overflow, return NULL, so that the caller can panic or handle a failed allocation. */
if (size >= SIZE_MAX/2) {
zfree_with_flags(ptr, flags);
zmalloc_oom_handler(size);
return NULL;
}
size_t oldsize = zmalloc_size(ptr);
void *newptr = rallocx(ptr, size, flags);
if (newptr == NULL) {
zmalloc_oom_handler(size);
return NULL;
}
update_zmalloc_stat_free(oldsize);
size = zmalloc_size(newptr);
update_zmalloc_stat_alloc(size);
return newptr;
}
void zfree_with_flags(void *ptr, int flags) {
if (ptr == NULL) return;
update_zmalloc_stat_free(zmalloc_size(ptr));
dallocx(ptr, flags);
}
#endif
/* Allocation and free functions that bypass the thread cache
* and go straight to the allocator arena bins.
* Currently implemented only for jemalloc. Used for online defragmentation. */
@ -609,12 +657,12 @@ size_t zmalloc_get_rss(void) {
#include "redisassert.h"
#define STRINGIFY_(x) #x
#define STRINGIFY(x) STRINGIFY_(x)
/* Compute the total memory wasted in fragmentation of inside small arena bins.
* Done by summing the memory in unused regs in all slabs of all small bins. */
size_t zmalloc_get_frag_smallbins(void) {
* Done by summing the memory in unused regs in all slabs of all small bins.
*
* Pass in arena to get the information of the specified arena, otherwise pass
* in MALLCTL_ARENAS_ALL to get all. */
size_t zmalloc_get_frag_smallbins_by_arena(unsigned int arena) {
unsigned nbins;
size_t sz, frag = 0;
char buf[100];
@ -626,22 +674,22 @@ size_t zmalloc_get_frag_smallbins(void) {
uint32_t nregs;
/* The size of the current bin */
snprintf(buf, sizeof(buf), "arenas.bin.%d.size", j);
snprintf(buf, sizeof(buf), "arenas.bin.%u.size", j);
sz = sizeof(size_t);
assert(!je_mallctl(buf, &reg_size, &sz, NULL, 0));
/* Number of used regions in the bin */
snprintf(buf, sizeof(buf), "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".bins.%d.curregs", j);
snprintf(buf, sizeof(buf), "stats.arenas.%u.bins.%u.curregs", arena, j);
sz = sizeof(size_t);
assert(!je_mallctl(buf, &curregs, &sz, NULL, 0));
/* Number of regions per slab */
snprintf(buf, sizeof(buf), "arenas.bin.%d.nregs", j);
snprintf(buf, sizeof(buf), "arenas.bin.%u.nregs", j);
sz = sizeof(uint32_t);
assert(!je_mallctl(buf, &nregs, &sz, NULL, 0));
/* Number of current slabs in the bin */
snprintf(buf, sizeof(buf), "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".bins.%d.curslabs", j);
snprintf(buf, sizeof(buf), "stats.arenas.%u.bins.%u.curslabs", arena, j);
sz = sizeof(size_t);
assert(!je_mallctl(buf, &curslabs, &sz, NULL, 0));
@ -652,15 +700,30 @@ size_t zmalloc_get_frag_smallbins(void) {
return frag;
}
int zmalloc_get_allocator_info(size_t *allocated, size_t *active, size_t *resident,
/* Compute the total memory wasted in fragmentation of inside small arena bins.
* Done by summing the memory in unused regs in all slabs of all small bins. */
size_t zmalloc_get_frag_smallbins(void) {
return zmalloc_get_frag_smallbins_by_arena(MALLCTL_ARENAS_ALL);
}
/* Get memory allocation information from allocator.
*
* refresh_stats indicates whether to refresh cached statistics.
* For the meaning of the other parameters, please refer to the function implementation
* and INFO's allocator_* in redis-doc. */
int zmalloc_get_allocator_info(int refresh_stats, size_t *allocated, size_t *active, size_t *resident,
size_t *retained, size_t *muzzy, size_t *frag_smallbins_bytes)
{
uint64_t epoch = 1;
size_t sz;
*allocated = *resident = *active = 0;
/* Update the statistics cached by mallctl. */
sz = sizeof(epoch);
je_mallctl("epoch", &epoch, &sz, &epoch, sz);
if (refresh_stats) {
uint64_t epoch = 1;
sz = sizeof(epoch);
je_mallctl("epoch", &epoch, &sz, &epoch, sz);
}
sz = sizeof(size_t);
/* Unlike RSS, this does not include RSS from shared libraries and other non
* heap mappings. */
@ -683,8 +746,10 @@ int zmalloc_get_allocator_info(size_t *allocated, size_t *active, size_t *reside
/* Unlike retained, Muzzy representats memory released with `madvised(..., MADV_FREE)`.
* These pages will show as RSS for the process, until the OS decides to re-use them. */
if (muzzy) {
char buf[100];
size_t pmuzzy, page;
assert(!je_mallctl("stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".pmuzzy", &pmuzzy, &sz, NULL, 0));
snprintf(buf, sizeof(buf), "stats.arenas.%u.pmuzzy", MALLCTL_ARENAS_ALL);
assert(!je_mallctl(buf, &pmuzzy, &sz, NULL, 0));
assert(!je_mallctl("arenas.page", &page, &sz, NULL, 0));
*muzzy = pmuzzy * page;
}
@ -694,6 +759,53 @@ int zmalloc_get_allocator_info(size_t *allocated, size_t *active, size_t *reside
return 1;
}
/* Get the specified arena memory allocation information from allocator.
*
* refresh_stats indicates whether to refresh cached statistics.
* For the meaning of the other parameters, please refer to the function implementation
* and INFO's allocator_* in redis-doc. */
int zmalloc_get_allocator_info_by_arena(unsigned int arena, int refresh_stats, size_t *allocated,
size_t *active, size_t *resident, size_t *frag_smallbins_bytes)
{
char buf[100];
size_t sz;
*allocated = *resident = *active = 0;
/* Update the statistics cached by mallctl. */
if (refresh_stats) {
uint64_t epoch = 1;
sz = sizeof(epoch);
je_mallctl("epoch", &epoch, &sz, &epoch, sz);
}
sz = sizeof(size_t);
/* Unlike RSS, this does not include RSS from shared libraries and other non
* heap mappings. */
snprintf(buf, sizeof(buf), "stats.arenas.%u.small.resident", arena);
je_mallctl(buf, resident, &sz, NULL, 0);
/* Unlike resident, this doesn't not include the pages jemalloc reserves
* for re-use (purge will clean that). */
size_t pactive, page;
snprintf(buf, sizeof(buf), "stats.arenas.%u.pactive", arena);
assert(!je_mallctl(buf, &pactive, &sz, NULL, 0));
assert(!je_mallctl("arenas.page", &page, &sz, NULL, 0));
*active = pactive * page;
/* Unlike zmalloc_used_memory, this matches the stats.resident by taking
* into account all allocations done by this process (not only zmalloc). */
size_t small_allcated, large_allacted;
snprintf(buf, sizeof(buf), "stats.arenas.%u.small.allocated", arena);
assert(!je_mallctl(buf, &small_allcated, &sz, NULL, 0));
*allocated += small_allcated;
snprintf(buf, sizeof(buf), "stats.arenas.%u.large.allocated", arena);
assert(!je_mallctl(buf, &large_allacted, &sz, NULL, 0));
*allocated += large_allacted;
/* Total size of consumed meomry in unused regs in small bins (AKA external fragmentation). */
*frag_smallbins_bytes = zmalloc_get_frag_smallbins_by_arena(arena);
return 1;
}
void set_jemalloc_bg_thread(int enable) {
/* let jemalloc do purging asynchronously, required when there's no traffic
* after flushdb */
@ -707,7 +819,7 @@ int jemalloc_purge(void) {
unsigned narenas = 0;
size_t sz = sizeof(unsigned);
if (!je_mallctl("arenas.narenas", &narenas, &sz, NULL, 0)) {
snprintf(tmp, sizeof(tmp), "arena.%d.purge", narenas);
snprintf(tmp, sizeof(tmp), "arena.%u.purge", narenas);
if (!je_mallctl(tmp, NULL, 0, NULL, 0))
return 0;
}
@ -716,15 +828,26 @@ int jemalloc_purge(void) {
#else
int zmalloc_get_allocator_info(size_t *allocated, size_t *active, size_t *resident,
int zmalloc_get_allocator_info(int refresh_stats, size_t *allocated, size_t *active, size_t *resident,
size_t *retained, size_t *muzzy, size_t *frag_smallbins_bytes)
{
UNUSED(refresh_stats);
*allocated = *resident = *active = *frag_smallbins_bytes = 0;
if (retained) *retained = 0;
if (muzzy) *muzzy = 0;
return 1;
}
int zmalloc_get_allocator_info_by_arena(unsigned int arena, int refresh_stats, size_t *allocated,
size_t *active, size_t *resident, size_t *frag_smallbins_bytes)
{
UNUSED(arena);
UNUSED(refresh_stats);
*allocated = *resident = *active = *frag_smallbins_bytes = 0;
return 1;
}
void set_jemalloc_bg_thread(int enable) {
((void)(enable));
}
@ -735,15 +858,6 @@ int jemalloc_purge(void) {
#endif
/* This function provides us access to the libc malloc_trim(). */
void zlibc_trim(void) {
#if defined(__GLIBC__) && !defined(USE_LIBC)
malloc_trim(0);
#else
return;
#endif
}
#if defined(__APPLE__)
/* For proc_pidinfo() used later in zmalloc_get_smap_bytes_by_field().
* Note that this file cannot be included in zmalloc.h because it includes

View File

@ -50,7 +50,6 @@
*/
#ifndef ZMALLOC_LIB
#define ZMALLOC_LIB "libc"
#define USE_LIBC 1
#if !defined(NO_MALLOC_USABLE_SIZE) && \
(defined(__GLIBC__) || defined(__FreeBSD__) || \
@ -73,11 +72,6 @@
#endif
#endif
/* Includes for malloc_trim(), see zlibc_trim(). */
#if defined(__GLIBC__) && !defined(USE_LIBC)
#include <malloc.h>
#endif
/* We can enable the Redis defrag capabilities only if we are using Jemalloc
* and the version used is our special version modified for Redis having
* the ability to return per-allocation fragmentation hints. */
@ -107,17 +101,24 @@ __attribute__((malloc)) char *zstrdup(const char *s);
size_t zmalloc_used_memory(void);
void zmalloc_set_oom_handler(void (*oom_handler)(size_t));
size_t zmalloc_get_rss(void);
int zmalloc_get_allocator_info(size_t *allocated, size_t *active, size_t *resident,
int zmalloc_get_allocator_info(int refresh_stats, size_t *allocated, size_t *active, size_t *resident,
size_t *retained, size_t *muzzy, size_t *frag_smallbins_bytes);
int zmalloc_get_allocator_info_by_arena(unsigned int arena, int refresh_stats, size_t *allocated,
size_t *active, size_t *resident, size_t *frag_smallbins_bytes);
void set_jemalloc_bg_thread(int enable);
int jemalloc_purge(void);
size_t zmalloc_get_private_dirty(long pid);
size_t zmalloc_get_smap_bytes_by_field(char *field, long pid);
size_t zmalloc_get_memory_size(void);
void zlibc_free(void *ptr);
void zlibc_trim(void);
void zmadvise_dontneed(void *ptr);
#if defined(USE_JEMALLOC)
void *zmalloc_with_flags(size_t size, int flags);
void *zrealloc_with_flags(void *ptr, size_t size, int flags);
void zfree_with_flags(void *ptr, int flags);
#endif
#ifdef HAVE_DEFRAG
void zfree_no_tcache(void *ptr);
__attribute__((malloc)) void *zmalloc_no_tcache(size_t size);

View File

@ -585,12 +585,16 @@ run_solo {defrag} {
}
}
if {$::verbose} {
puts "used [s allocator_allocated]"
puts "rss [s allocator_active]"
puts "frag_bytes [s allocator_frag_bytes]"
puts "frag $frag"
puts "misses: $misses"
puts "hits: $hits"
puts "max latency $max_latency"
puts [r latency latest]
puts [r latency history active-defrag-cycle]
puts [r memory malloc-stats]
}
assert {$frag < 1.1}
# due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
@ -720,11 +724,11 @@ run_solo {defrag} {
}
}
start_cluster 1 0 {tags {"defrag external:skip cluster"} overrides {appendonly yes auto-aof-rewrite-percentage 0 save ""}} {
start_cluster 1 0 {tags {"defrag external:skip cluster"} overrides {appendonly yes auto-aof-rewrite-percentage 0 save "" loglevel debug}} {
test_active_defrag "cluster"
}
start_server {tags {"defrag external:skip standalone"} overrides {appendonly yes auto-aof-rewrite-percentage 0 save ""}} {
start_server {tags {"defrag external:skip standalone"} overrides {appendonly yes auto-aof-rewrite-percentage 0 save "" loglevel debug}} {
test_active_defrag "standalone"
}
} ;# run_solo