1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
|
// Copyright 2020 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/flags/internal/sequence_lock.h"
#include <algorithm>
#include <atomic>
#include <thread> // NOLINT(build/c++11)
#include <tuple>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/container/fixed_array.h"
#include "absl/time/clock.h"
namespace {
namespace flags = absl::flags_internal;
class ConcurrentSequenceLockTest
: public testing::TestWithParam<std::tuple<int, int>> {
public:
ConcurrentSequenceLockTest()
: buf_bytes_(std::get<0>(GetParam())),
num_threads_(std::get<1>(GetParam())) {}
protected:
const int buf_bytes_;
const int num_threads_;
};
TEST_P(ConcurrentSequenceLockTest, ReadAndWrite) {
const int buf_words =
flags::AlignUp(buf_bytes_, sizeof(uint64_t)) / sizeof(uint64_t);
// The buffer that will be protected by the SequenceLock.
absl::FixedArray<std::atomic<uint64_t>> protected_buf(buf_words);
for (auto& v : protected_buf) v = -1;
flags::SequenceLock seq_lock;
std::atomic<bool> stop{false};
std::atomic<int64_t> bad_reads{0};
std::atomic<int64_t> good_reads{0};
std::atomic<int64_t> unsuccessful_reads{0};
// Start a bunch of threads which read 'protected_buf' under the sequence
// lock. The main thread will concurrently update 'protected_buf'. The updates
// always consist of an array of identical integers. The reader ensures that
// any data it reads matches that pattern (i.e. the reads are not "torn").
std::vector<std::thread> threads;
for (int i = 0; i < num_threads_; i++) {
threads.emplace_back([&]() {
absl::FixedArray<char> local_buf(buf_bytes_);
while (!stop.load(std::memory_order_relaxed)) {
if (seq_lock.TryRead(local_buf.data(), protected_buf.data(),
buf_bytes_)) {
bool good = true;
for (const auto& v : local_buf) {
if (v != local_buf[0]) good = false;
}
if (good) {
good_reads.fetch_add(1, std::memory_order_relaxed);
} else {
bad_reads.fetch_add(1, std::memory_order_relaxed);
}
} else {
unsuccessful_reads.fetch_add(1, std::memory_order_relaxed);
}
}
});
}
while (unsuccessful_reads.load(std::memory_order_relaxed) < num_threads_) {
absl::SleepFor(absl::Milliseconds(1));
}
seq_lock.MarkInitialized();
// Run a maximum of 5 seconds. On Windows, the scheduler behavior seems
// somewhat unfair and without an explicit timeout for this loop, the tests
// can run a long time.
absl::Time deadline = absl::Now() + absl::Seconds(5);
for (int i = 0; i < 100 && absl::Now() < deadline; i++) {
absl::FixedArray<char> writer_buf(buf_bytes_);
for (auto& v : writer_buf) v = i;
seq_lock.Write(protected_buf.data(), writer_buf.data(), buf_bytes_);
absl::SleepFor(absl::Microseconds(10));
}
stop.store(true, std::memory_order_relaxed);
for (auto& t : threads) t.join();
ASSERT_GE(good_reads, 0);
ASSERT_EQ(bad_reads, 0);
}
// Simple helper for generating a range of thread counts.
// Generates [low, low*scale, low*scale^2, ...high)
// (even if high is between low*scale^k and low*scale^(k+1)).
std::vector<int> MultiplicativeRange(int low, int high, int scale) {
std::vector<int> result;
for (int current = low; current < high; current *= scale) {
result.push_back(current);
}
result.push_back(high);
return result;
}
#ifndef ABSL_HAVE_THREAD_SANITIZER
const int kMaxThreads = absl::base_internal::NumCPUs();
#else
// With TSAN, a lot of threads contending for atomic access on the sequence
// lock make this test run too slowly.
const int kMaxThreads = std::min(absl::base_internal::NumCPUs(), 4);
#endif
INSTANTIATE_TEST_SUITE_P(
TestManyByteSizes, ConcurrentSequenceLockTest,
testing::Combine(
// Buffer size (bytes).
testing::Range(1, 128),
// Number of reader threads.
testing::ValuesIn(MultiplicativeRange(1, kMaxThreads, 2))));
// Simple single-threaded test, parameterized by the size of the buffer to be
// protected.
class SequenceLockTest : public testing::TestWithParam<int> {};
TEST_P(SequenceLockTest, SingleThreaded) {
const int size = GetParam();
absl::FixedArray<std::atomic<uint64_t>> protected_buf(
flags::AlignUp(size, sizeof(uint64_t)) / sizeof(uint64_t));
flags::SequenceLock seq_lock;
seq_lock.MarkInitialized();
std::vector<char> src_buf(size, 'x');
seq_lock.Write(protected_buf.data(), src_buf.data(), size);
std::vector<char> dst_buf(size, '0');
ASSERT_TRUE(seq_lock.TryRead(dst_buf.data(), protected_buf.data(), size));
ASSERT_EQ(src_buf, dst_buf);
}
INSTANTIATE_TEST_SUITE_P(TestManyByteSizes, SequenceLockTest,
// Buffer size (bytes).
testing::Range(1, 128));
} // namespace
|