Make atomics a hard-dep. (#12730)

They are used extensively throughout our code base, and not having
support for them does not generate a thread-safe agent.
This commit is contained in:
vkalintiris 2022-05-02 17:59:40 +03:00 committed by GitHub
parent db9b85a9cb
commit b6d2a3662d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 6 additions and 183 deletions

View File

@ -28,7 +28,6 @@ EXTRA_DIST = \
.eslintrc \
.github/CODEOWNERS \
build/m4/jemalloc.m4 \
build/m4/ax_c___atomic.m4 \
build/m4/ax_check_enable_debug.m4 \
build/m4/ax_c_mallinfo.m4 \
build/m4/ax_gcc_func_attribute.m4 \
@ -941,22 +940,15 @@ if ENABLE_ML_TESTS
$(NULL)
endif
if ENABLE_CXX_LINKER
netdata_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
else
netdata_LINK = $(CCLD) $(CFLAGS) $(LDFLAGS) -o $@
endif
netdata_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
sbin_PROGRAMS += netdatacli
netdatacli_SOURCES = $(NETDATACLI_FILES)
netdatacli_LDADD = \
$(NETDATA_COMMON_LIBS) \
$(NULL)
if ENABLE_CXX_LINKER
netdatacli_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
else
netdatacli_LINK = $(CCLD) $(CFLAGS) $(LDFLAGS) -o $@
endif
netdatacli_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
if ENABLE_PLUGIN_APPS
plugins_PROGRAMS += apps.plugin

View File

@ -65,17 +65,6 @@ int aclk_env_has_capa(const char *capa)
#ifdef ACLK_LOG_CONVERSATION_DIR
volatile int aclk_conversation_log_counter = 0;
#if !defined(HAVE_C___ATOMIC)
netdata_mutex_t aclk_conversation_log_mutex = NETDATA_MUTEX_INITIALIZER;
int aclk_get_conv_log_next()
{
int ret;
netdata_mutex_lock(&aclk_conversation_log_mutex);
ret = aclk_conversation_log_counter++;
netdata_mutex_unlock(&aclk_conversation_log_mutex);
return ret;
}
#endif
#endif
#define ACLK_TOPIC_PREFIX "/agent/"

View File

@ -99,13 +99,7 @@ void free_topic_cache(void);
#ifdef ACLK_LOG_CONVERSATION_DIR
extern volatile int aclk_conversation_log_counter;
#if defined(HAVE_C___ATOMIC)
#define ACLK_GET_CONV_LOG_NEXT() __atomic_fetch_add(&aclk_conversation_log_counter, 1, __ATOMIC_SEQ_CST)
#else
extern netdata_mutex_t aclk_conversation_log_mutex;
int aclk_get_conv_log_next();
#define ACLK_GET_CONV_LOG_NEXT() aclk_get_conv_log_next()
#endif
#endif
unsigned long int aclk_tbeb_delay(int reset, int base, unsigned long int min, unsigned long int max);

View File

@ -1,36 +0,0 @@
# AC_C___ATOMIC
# -------------
# Define HAVE_C___ATOMIC if __atomic works.
AN_IDENTIFIER([__atomic], [AC_C___ATOMIC])
AC_DEFUN([AC_C___ATOMIC],
[AC_CACHE_CHECK([for __atomic], ac_cv_c___atomic,
[AC_LINK_IFELSE(
[AC_LANG_SOURCE(
[[int
main (int argc, char **argv)
{
volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2;
__atomic_load_n(&ul1, __ATOMIC_SEQ_CST);
__atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST);
__atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
__atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2;
__atomic_load_n(&ull1, __ATOMIC_SEQ_CST);
__atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST);
__atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
__atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
return 0;
}
]])],
[ac_cv_c___atomic=yes],
[ac_cv_c___atomic=no])])
if test $ac_cv_c___atomic = yes; then
AC_DEFINE([HAVE_C___ATOMIC], 1,
[Define to 1 if __atomic operations work.])
fi
])# AC_C___ATOMIC

View File

@ -260,7 +260,6 @@ AC_TYPE_UINT64_T
AC_C_INLINE
AC_FUNC_STRERROR_R
AC_C__GENERIC
AC_C___ATOMIC
# AC_C_STMT_EXPR
AC_CANONICAL_HOST
AC_HEADER_MAJOR
@ -1546,14 +1545,6 @@ AC_MSG_RESULT([${enable_lto}])
# -----------------------------------------------------------------------------
if test "${enable_exporting_kinesis}" = "yes" -o \
"${enable_exporting_pubsub}" = "yes" -o \
"${enable_exporting_prometheus_remote_write}" = "yes" -o \
"${new_cloud_protocol}" = "yes" -o \
"${build_ml}" = "yes"; then
enable_cxx_linker="yes"
fi
# Try to unconditionally link with -latomic. If the compiler can satisfy
# all the atomic ops with builtins then, the library will be left unused.
# Otherwise, some ops will be covered by the compiler's intrinsics and some
@ -1583,8 +1574,6 @@ AC_SUBST([OPTIONAL_ATOMIC_LIBS])
AC_LANG_POP([C++])
AM_CONDITIONAL([ENABLE_CXX_LINKER], [test "${enable_cxx_linker}" = "yes"])
AC_DEFINE_UNQUOTED([NETDATA_USER], ["${with_user}"], [use this user to drop privileged])
varlibdir="${localstatedir}/lib/netdata"

View File

@ -37,37 +37,10 @@ static struct global_statistics {
.rrdr_result_points_generated = 0,
};
#if defined(HAVE_C___ATOMIC)
#else
netdata_mutex_t global_statistics_mutex = NETDATA_MUTEX_INITIALIZER;
static inline void global_statistics_lock(void) {
netdata_mutex_lock(&global_statistics_mutex);
}
static inline void global_statistics_unlock(void) {
netdata_mutex_unlock(&global_statistics_mutex);
}
#endif
void rrdr_query_completed(uint64_t db_points_read, uint64_t result_points_generated) {
#if defined(HAVE_C___ATOMIC)
__atomic_fetch_add(&global_statistics.rrdr_queries_made, 1, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&global_statistics.rrdr_db_points_read, db_points_read, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&global_statistics.rrdr_result_points_generated, result_points_generated, __ATOMIC_SEQ_CST);
#else
#warning NOT using atomic operations - using locks for global statistics
if (web_server_is_multithreaded)
global_statistics_lock();
global_statistics.rrdr_queries_made++;
global_statistics.rrdr_db_points_read += db_points_read;
global_statistics.rrdr_result_points_generated += result_points_generated;
if (web_server_is_multithreaded)
global_statistics_unlock();
#endif
}
void finished_web_request_statistics(uint64_t dt,
@ -75,7 +48,6 @@ void finished_web_request_statistics(uint64_t dt,
uint64_t bytes_sent,
uint64_t content_size,
uint64_t compressed_content_size) {
#if defined(HAVE_C___ATOMIC)
uint64_t old_web_usec_max = global_statistics.web_usec_max;
while(dt > old_web_usec_max)
__atomic_compare_exchange(&global_statistics.web_usec_max, &old_web_usec_max, &dt, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
@ -86,61 +58,19 @@ void finished_web_request_statistics(uint64_t dt,
__atomic_fetch_add(&global_statistics.bytes_sent, bytes_sent, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&global_statistics.content_size, content_size, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&global_statistics.compressed_content_size, compressed_content_size, __ATOMIC_SEQ_CST);
#else
#warning NOT using atomic operations - using locks for global statistics
if (web_server_is_multithreaded)
global_statistics_lock();
if (dt > global_statistics.web_usec_max)
global_statistics.web_usec_max = dt;
global_statistics.web_requests++;
global_statistics.web_usec += dt;
global_statistics.bytes_received += bytes_received;
global_statistics.bytes_sent += bytes_sent;
global_statistics.content_size += content_size;
global_statistics.compressed_content_size += compressed_content_size;
if (web_server_is_multithreaded)
global_statistics_unlock();
#endif
}
uint64_t web_client_connected(void) {
#if defined(HAVE_C___ATOMIC)
__atomic_fetch_add(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST);
uint64_t id = __atomic_fetch_add(&global_statistics.web_client_count, 1, __ATOMIC_SEQ_CST);
#else
if (web_server_is_multithreaded)
global_statistics_lock();
global_statistics.connected_clients++;
uint64_t id = global_statistics.web_client_count++;
if (web_server_is_multithreaded)
global_statistics_unlock();
#endif
return id;
return __atomic_fetch_add(&global_statistics.web_client_count, 1, __ATOMIC_SEQ_CST);
}
void web_client_disconnected(void) {
#if defined(HAVE_C___ATOMIC)
__atomic_fetch_sub(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST);
#else
if (web_server_is_multithreaded)
global_statistics_lock();
global_statistics.connected_clients--;
if (web_server_is_multithreaded)
global_statistics_unlock();
#endif
}
static inline void global_statistics_copy(struct global_statistics *gs, uint8_t options) {
#if defined(HAVE_C___ATOMIC)
gs->connected_clients = __atomic_fetch_add(&global_statistics.connected_clients, 0, __ATOMIC_SEQ_CST);
gs->web_requests = __atomic_fetch_add(&global_statistics.web_requests, 0, __ATOMIC_SEQ_CST);
gs->web_usec = __atomic_fetch_add(&global_statistics.web_usec, 0, __ATOMIC_SEQ_CST);
@ -160,16 +90,6 @@ static inline void global_statistics_copy(struct global_statistics *gs, uint8_t
__atomic_compare_exchange(&global_statistics.web_usec_max, (uint64_t *) &gs->web_usec_max, &n, 1, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
#else
global_statistics_lock();
memcpy(gs, (const void *)&global_statistics, sizeof(struct global_statistics));
if (options & GLOBAL_STATS_RESET_WEB_USEC_MAX)
global_statistics.web_usec_max = 0;
global_statistics_unlock();
#endif
}
static void global_statistics_charts(void) {

View File

@ -172,15 +172,9 @@ typedef enum rrddim_flags {
RRDDIM_FLAG_PENDING_FOREACH_ALARM = (1 << 5), // set when foreach alarm has not been initialized yet
} RRDDIM_FLAGS;
#ifdef HAVE_C___ATOMIC
#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & (flag))
#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_SEQ_CST)
#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_SEQ_CST)
#else
#define rrddim_flag_check(rd, flag) ((rd)->flags & (flag))
#define rrddim_flag_set(rd, flag) (rd)->flags |= (flag)
#define rrddim_flag_clear(rd, flag) (rd)->flags &= ~(flag)
#endif
typedef enum label_source {
LABEL_SOURCE_AUTO = 0,
@ -483,16 +477,9 @@ typedef enum rrdset_flags {
RRDSET_FLAG_ANOMALY_DETECTION = 1 << 18 // flag to identify anomaly detection charts.
} RRDSET_FLAGS;
#ifdef HAVE_C___ATOMIC
#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & (flag))
#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST)
#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~flag, __ATOMIC_SEQ_CST)
#else
#define rrdset_flag_check(st, flag) ((st)->flags & (flag))
#define rrdset_flag_set(st, flag) (st)->flags |= (flag)
#define rrdset_flag_clear(st, flag) (st)->flags &= ~(flag)
#endif
#define rrdset_flag_check_noatomic(st, flag) ((st)->flags & (flag))
struct rrdset {
// ------------------------------------------------------------------------
@ -642,15 +629,9 @@ typedef enum rrdhost_flags {
RRDHOST_FLAG_PENDING_FOREACH_ALARMS = 1 << 7, // contains dims with uninitialized foreach alarms
} RRDHOST_FLAGS;
#ifdef HAVE_C___ATOMIC
#define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & (flag))
#define rrdhost_flag_set(host, flag) __atomic_or_fetch(&((host)->flags), flag, __ATOMIC_SEQ_CST)
#define rrdhost_flag_clear(host, flag) __atomic_and_fetch(&((host)->flags), ~flag, __ATOMIC_SEQ_CST)
#else
#define rrdhost_flag_check(host, flag) ((host)->flags & (flag))
#define rrdhost_flag_set(host, flag) (host)->flags |= (flag)
#define rrdhost_flag_clear(host, flag) (host)->flags &= ~(flag)
#endif
#ifdef NETDATA_INTERNAL_CHECKS
#define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \

View File

@ -953,7 +953,7 @@ RRDSET *rrdset_create_custom(
// RRDSET - data collection iteration control
inline void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds) {
if(unlikely(!st->last_collected_time.tv_sec || !microseconds || (rrdset_flag_check_noatomic(st, RRDSET_FLAG_SYNC_CLOCK)))) {
if(unlikely(!st->last_collected_time.tv_sec || !microseconds || (rrdset_flag_check(st, RRDSET_FLAG_SYNC_CLOCK)))) {
// call the full next_usec() function
rrdset_next_usec(st, microseconds);
return;
@ -971,7 +971,7 @@ inline void rrdset_next_usec(RRDSET *st, usec_t microseconds) {
usec_t discarded = microseconds;
#endif
if(unlikely(rrdset_flag_check_noatomic(st, RRDSET_FLAG_SYNC_CLOCK))) {
if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_SYNC_CLOCK))) {
// the chart needs to be re-synced to current time
rrdset_flag_clear(st, RRDSET_FLAG_SYNC_CLOCK);

View File

@ -68,15 +68,9 @@ typedef enum web_client_flags {
WEB_CLIENT_CHUNKED_TRANSFER = 1 << 10, // chunked transfer (used with zlib compression)
} WEB_CLIENT_FLAGS;
//#ifdef HAVE_C___ATOMIC
//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag)
//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST)
//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST)
//#else
#define web_client_flag_check(w, flag) ((w)->flags & (flag))
#define web_client_flag_set(w, flag) (w)->flags |= flag
#define web_client_flag_clear(w, flag) (w)->flags &= ~flag
//#endif
#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)