Explicitly init some std::atomic.

GitOrigin-RevId: bfbe95bd1aaed9c3ade31d9a7222b223bfece347
This commit is contained in:
levlam 2019-01-24 06:18:14 +03:00
parent 0ac8c2d389
commit 2edc069583
15 changed files with 42 additions and 46 deletions

View File

@ -110,7 +110,7 @@ BENCH(TdRandFast, "td_rand_fast") {
#if !TD_THREAD_UNSUPPORTED
BENCH(SslRand, "ssl_rand_int32") {
std::vector<td::thread> v;
std::atomic<td::uint32> sum;
std::atomic<td::uint32> sum{0};
for (int i = 0; i < 3; i++) {
v.push_back(td::thread([&] {
td::int32 res = 0;

View File

@ -28,12 +28,6 @@
#include <sys/eventfd.h>
#endif
using std::atomic;
using std::vector;
using td::int32;
using td::uint32;
#define MODE std::memory_order_relaxed
// void set_affinity(int mask) {
@ -100,7 +94,7 @@ class Backoff {
};
class VarQueue {
atomic<qvalue_t> data;
std::atomic<qvalue_t> data{0};
public:
void init() {
@ -218,17 +212,17 @@ const int queue_buf_size = 1 << 10;
class BufferQueue {
struct node {
qvalue_t val;
char pad[64 - sizeof(atomic<qvalue_t>)];
char pad[64 - sizeof(std::atomic<qvalue_t>)];
};
node q[queue_buf_size];
struct Position {
atomic<uint32> i;
char pad[64 - sizeof(atomic<uint32>)];
std::atomic<td::uint32> i{0};
char pad[64 - sizeof(std::atomic<td::uint32>)];
uint32 local_read_i;
uint32 local_write_i;
char pad2[64 - sizeof(uint32) * 2];
td::uint32 local_read_i;
td::uint32 local_write_i;
char pad2[64 - sizeof(td::uint32) * 2];
void init() {
i = 0;
@ -342,7 +336,7 @@ class BufferQueue {
#if TD_LINUX
class BufferedFdQueue {
int fd;
atomic<int> wait_flag;
std::atomic<int> wait_flag{0};
BufferQueue q;
char pad[64];
@ -440,7 +434,7 @@ class BufferedFdQueue {
class FdQueue {
int fd;
atomic<int> wait_flag;
std::atomic<int> wait_flag{0};
VarQueue q;
char pad[64];
@ -572,8 +566,8 @@ class QueueBenchmark2 : public td::Benchmark {
int server_active_connections;
int client_active_connections;
vector<td::int64> server_conn;
vector<td::int64> client_conn;
std::vector<td::int64> server_conn;
std::vector<td::int64> client_conn;
public:
explicit QueueBenchmark2(int connections_n = 1) : connections_n(connections_n) {
@ -615,7 +609,7 @@ class QueueBenchmark2 : public td::Benchmark {
}
void *server_run(void *) {
server_conn = vector<td::int64>(connections_n);
server_conn = std::vector<td::int64>(connections_n);
server_active_connections = connections_n;
while (server_active_connections > 0) {
@ -656,7 +650,7 @@ class QueueBenchmark2 : public td::Benchmark {
}
void *client_run(void *) {
client_conn = vector<td::int64>(connections_n);
client_conn = std::vector<td::int64>(connections_n);
client_active_connections = connections_n;
if (queries_n >= (1 << 24)) {
std::fprintf(stderr, "Too big queries_n\n");
@ -732,7 +726,7 @@ class QueueBenchmark : public td::Benchmark {
}
void *server_run(void *) {
vector<td::int64> conn(connections_n);
std::vector<td::int64> conn(connections_n);
int active_connections = connections_n;
while (active_connections > 0) {
qvalue_t value = server.get();
@ -756,7 +750,7 @@ class QueueBenchmark : public td::Benchmark {
}
void *client_run(void *) {
vector<td::int64> conn(connections_n);
std::vector<td::int64> conn(connections_n);
if (queries_n >= (1 << 24)) {
std::fprintf(stderr, "Too big queries_n\n");
std::exit(0);
@ -789,7 +783,7 @@ class QueueBenchmark : public td::Benchmark {
}
void *client_run2(void *) {
vector<td::int64> conn(connections_n);
std::vector<td::int64> conn(connections_n);
if (queries_n >= (1 << 24)) {
std::fprintf(stderr, "Too big queries_n\n");
std::exit(0);

View File

@ -72,7 +72,8 @@ static int fast_backtrace(void **buffer, int size) {
return i;
}
static std::atomic<std::size_t> fast_backtrace_failed_cnt, backtrace_total_cnt;
static std::atomic<std::size_t> fast_backtrace_failed_cnt;
static std::atomic<std::size_t> backtrace_total_cnt;
double get_fast_backtrace_success_rate() {
return 1 - static_cast<double>(fast_backtrace_failed_cnt.load(std::memory_order_relaxed)) /
static_cast<double>(std::max(std::size_t(1), backtrace_total_cnt.load(std::memory_order_relaxed)));

View File

@ -378,8 +378,8 @@ class Global : public ActorContext {
int32 gc_scheduler_id_;
int32 slow_net_scheduler_id_;
std::atomic<double> server_time_difference_;
std::atomic<bool> server_time_difference_was_updated_;
std::atomic<double> server_time_difference_{0.0};
std::atomic<bool> server_time_difference_was_updated_{false};
std::atomic<bool> close_flag_{false};
std::vector<std::shared_ptr<NetStatsCallback>> net_stats_file_callbacks_;

View File

@ -86,7 +86,7 @@ class ConcurrentScheduler : private Scheduler::Callback {
enum class State { Start, Run };
State state_ = State::Start;
std::vector<unique_ptr<Scheduler>> schedulers_;
std::atomic<bool> is_finished_;
std::atomic<bool> is_finished_{false};
std::mutex at_finish_mutex_;
std::vector<std::function<void()>> at_finish_;
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED

View File

@ -63,7 +63,7 @@ class ConcurrentBinlog : public BinlogInterface {
ActorOwn<detail::BinlogActor> binlog_actor_;
string path_;
std::atomic<uint64> last_id_;
std::atomic<uint64> last_id_{0};
};
} // namespace td

View File

@ -20,7 +20,7 @@ class HazardPointers {
explicit HazardPointers(size_t threads_n) : threads_(threads_n) {
for (auto &data : threads_) {
for (auto &ptr : data.hazard) {
ptr = nullptr;
std::atomic_init(&ptr, nullptr);
}
}
}

View File

@ -77,7 +77,7 @@ class MemoryLog : public LogInterface {
private:
char buffer_[buffer_size];
std::atomic<uint32> pos_;
std::atomic<uint32> pos_{0};
};
} // namespace td

View File

@ -306,9 +306,9 @@ class MpmcQueueOld {
MpmcQueueBlock<T> block;
//Got pad in MpmcQueueBlock
};
std::atomic<Node *> write_pos_;
std::atomic<Node *> write_pos_{nullptr};
char pad[TD_CONCURRENCY_PAD - sizeof(std::atomic<Node *>)];
std::atomic<Node *> read_pos_;
std::atomic<Node *> read_pos_{nullptr};
char pad2[TD_CONCURRENCY_PAD - sizeof(std::atomic<Node *>)];
size_t block_size_;
HazardPointers<Node, 1> hazard_pointers_;
@ -438,9 +438,9 @@ class MpmcQueue {
char pad[TD_CONCURRENCY_PAD - sizeof(std::atomic<Node *>)];
//Got pad in MpmcQueueBlock
};
std::atomic<Node *> write_pos_;
std::atomic<Node *> write_pos_{nullptr};
char pad[TD_CONCURRENCY_PAD - sizeof(std::atomic<Node *>)];
std::atomic<Node *> read_pos_;
std::atomic<Node *> read_pos_{nullptr};
char pad2[TD_CONCURRENCY_PAD - sizeof(std::atomic<Node *>)];
HazardPointers<Node, 1> hazard_pointers_;
//Got pad in HazardPointers

View File

@ -34,7 +34,7 @@ class AtomicRefCnt {
}
private:
std::atomic<uint64> cnt_;
std::atomic<uint64> cnt_{0};
};
template <class DataT, class DeleterT>

View File

@ -103,13 +103,14 @@ BufferRaw *BufferAllocator::create_buffer_raw(size_t size) {
new (buffer_raw) BufferRaw();
buffer_raw->data_size_ = size;
buffer_raw->begin_ = 0;
buffer_raw->end_ = 0;
std::atomic_init(&buffer_raw->end_, 0);
buffer_raw->ref_cnt_.store(1, std::memory_order_relaxed);
buffer_raw->has_writer_.store(true, std::memory_order_relaxed);
std::atomic_init(&buffer_raw->ref_cnt_, 1);
std::atomic_init(&buffer_raw->has_writer_, true);
buffer_raw->was_reader_ = false;
return buffer_raw;
}
void BufferBuilder::append(BufferSlice slice) {
if (append_inplace(slice.as_slice())) {
return;

View File

@ -67,7 +67,7 @@ class SPSCBlockQueue {
}
struct Position {
std::atomic<uint32> i;
std::atomic<uint32> i{0};
char pad[64 - sizeof(std::atomic<uint32>)];
uint32 local_writer_i;
char pad2[64 - sizeof(uint32)];
@ -252,7 +252,7 @@ class SPSCChainQueue {
private:
struct Node {
BlockQueueT q_;
std::atomic<bool> is_closed_;
std::atomic<bool> is_closed_{false};
Node *next_;
void init() {
@ -399,7 +399,7 @@ class PollQueue : public QueueT {
private:
EventFd event_fd_;
std::atomic<int> wait_state_;
std::atomic<int> wait_state_{0};
int writer_wait_state_;
int get_wait_state() {

View File

@ -15,7 +15,7 @@
#if !TD_THREAD_UNSUPPORTED
TEST(HazardPointers, stress) {
struct Node {
std::atomic<std::string *> name_;
std::atomic<std::string *> name_{nullptr};
char pad[64];
};
int threads_n = 10;

View File

@ -18,7 +18,7 @@ TEST(MpmcWaiter, stress_one_one) {
td::Stage check;
std::vector<td::thread> threads;
std::atomic<size_t> value;
std::atomic<size_t> value{0};
size_t write_cnt = 10;
td::unique_ptr<td::MpmcWaiter> waiter;
size_t threads_n = 2;
@ -64,8 +64,8 @@ TEST(MpmcWaiter, stress) {
std::vector<td::thread> threads;
size_t write_n;
size_t read_n;
std::atomic<size_t> write_pos;
std::atomic<size_t> read_pos;
std::atomic<size_t> write_pos{0};
std::atomic<size_t> read_pos{0};
size_t end_pos;
size_t write_cnt;
size_t threads_n = 20;

View File

@ -96,7 +96,7 @@ TEST(Misc, errno_tls_bug) {
#if !TD_THREAD_UNSUPPORTED && !TD_EVENTFD_UNSUPPORTED
EventFd test_event_fd;
test_event_fd.init();
std::atomic<int> s(0);
std::atomic<int> s{0};
s = 1;
td::thread th([&] {
while (s != 1) {