Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mitigate clang build warnings -Wconversion #1763

Merged
merged 6 commits into from
Mar 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion BUILD.bazel
Original file line number Diff line number Diff line change
@@ -1,5 +1,22 @@
licenses(["notice"])

COPTS = [
"-pedantic",
"-pedantic-errors",
"-std=c++11",
"-Wall",
"-Wconversion",
"-Wextra",
"-Wshadow",
# "-Wshorten-64-to-32",
"-Wfloat-equal",
"-fstrict-aliasing",
## assert() are used a lot in tests upstream, which may be optimised out leading to
## unused-variable warning.
"-Wno-unused-variable",
"-Werror=old-style-cast",
]

config_setting(
name = "qnx",
constraint_values = ["@platforms//os:qnx"],
Expand Down Expand Up @@ -47,7 +64,7 @@ cc_library(
],
copts = select({
":windows": [],
"//conditions:default": ["-Werror=old-style-cast"],
"//conditions:default": COPTS,
}),
defines = [
"BENCHMARK_STATIC_DEFINE",
Expand Down
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@ else()
add_cxx_compiler_flag(-Wshadow)
add_cxx_compiler_flag(-Wfloat-equal)
add_cxx_compiler_flag(-Wold-style-cast)
add_cxx_compiler_flag(-Wconversion)
if(BENCHMARK_ENABLE_WERROR)
add_cxx_compiler_flag(-Werror)
endif()
Expand Down
3 changes: 2 additions & 1 deletion src/benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,8 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
benchmarks_with_threads += (benchmark.threads() > 1);
runners.emplace_back(benchmark, &perfcounters, reports_for_family);
int num_repeats_of_this_instance = runners.back().GetNumRepeats();
num_repetitions_total += num_repeats_of_this_instance;
num_repetitions_total +=
static_cast<size_t>(num_repeats_of_this_instance);
if (reports_for_family)
reports_for_family->num_runs_total += num_repeats_of_this_instance;
}
Expand Down
5 changes: 3 additions & 2 deletions src/benchmark_register.cc
Original file line number Diff line number Diff line change
Expand Up @@ -482,8 +482,9 @@ int Benchmark::ArgsCnt() const {

const char* Benchmark::GetArgName(int arg) const {
BM_CHECK_GE(arg, 0);
BM_CHECK_LT(arg, static_cast<int>(arg_names_.size()));
return arg_names_[arg].c_str();
size_t uarg = static_cast<size_t>(arg);
BM_CHECK_LT(uarg, arg_names_.size());
return arg_names_[uarg].c_str();
}

TimeUnit Benchmark::GetTimeUnit() const {
Expand Down
4 changes: 2 additions & 2 deletions src/benchmark_register.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ typename std::vector<T>::iterator AddPowers(std::vector<T>* dst, T lo, T hi,
static const T kmax = std::numeric_limits<T>::max();

// Space out the values in multiples of "mult"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm somewhat surprized that this is needed, but okay, i guess?

for (T i = static_cast<T>(1); i <= hi; i *= static_cast<T>(mult)) {
for (T i = static_cast<T>(1); i <= hi; i = static_cast<T>(i * mult)) {
if (i >= lo) {
dst->push_back(i);
}
Expand Down Expand Up @@ -52,7 +52,7 @@ void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {

const auto it = AddPowers(dst, hi_complement, lo_complement, mult);

std::for_each(it, dst->end(), [](T& t) { t *= -1; });
std::for_each(it, dst->end(), [](T& t) { t = static_cast<T>(t * -1); });
std::reverse(it, dst->end());
}

Expand Down
2 changes: 1 addition & 1 deletion src/benchmark_runner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ BenchmarkRunner::BenchmarkRunner(
has_explicit_iteration_count(b.iterations() != 0 ||
parsed_benchtime_flag.tag ==
BenchTimeType::ITERS),
pool(b.threads() - 1),
pool(static_cast<size_t>(b.threads() - 1)),
iters(has_explicit_iteration_count
? ComputeIters(b_, parsed_benchtime_flag)
: 1),
Expand Down
4 changes: 2 additions & 2 deletions src/cycleclock.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
// frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it
// reset to zero.
return mach_absolute_time();
return static_cast<int64_t>(mach_absolute_time());
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it.
Expand All @@ -82,7 +82,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
#elif defined(__x86_64__) || defined(__amd64__)
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
return static_cast<int64_t>((high << 32) | low);
#elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count.
#if defined(__powerpc64__) || defined(__ppc64__)
Expand Down
4 changes: 2 additions & 2 deletions src/statistics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
auto error_count = std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.skipped; });

if (reports.size() - error_count < 2) {
if (reports.size() - static_cast<size_t>(error_count) < 2) {
// We don't report aggregated data if there was a single run.
return results;
}
Expand Down Expand Up @@ -179,7 +179,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports.
data.iterations = reports.size();
data.iterations = static_cast<IterationCount>(reports.size());

data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
Expand Down
2 changes: 1 addition & 1 deletion src/string_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ void ToExponentAndMantissa(double val, int precision, double one_k,
scaled /= one_k;
if (scaled <= big_threshold) {
mantissa_stream << scaled;
*exponent = i + 1;
*exponent = static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
return;
}
Expand Down
9 changes: 4 additions & 5 deletions src/sysinfo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
CPUInfo::CacheInfo C;
C.num_sharing = static_cast<int>(b.count());
C.level = cache.Level;
C.size = cache.Size;
C.size = static_cast<int>(cache.Size);
C.type = "Unknown";
switch (cache.Type) {
case CacheUnified:
Expand Down Expand Up @@ -485,9 +485,8 @@ int GetNumCPUsImpl() {
// positives.
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors; // number of logical
// processors in the current
// group
// number of logical processors in the current group
return static_cast<int>(sysinfo.dwNumberOfProcessors);
#elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure.
long num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
Expand Down Expand Up @@ -837,7 +836,7 @@ std::vector<double> GetLoadAvg() {
!(defined(__ANDROID__) && __ANDROID_API__ < 29)
static constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples);
const size_t nelem = static_cast<size_t>(getloadavg(res.data(), kMaxSamples));
if (nelem < 1) {
res.clear();
} else {
Expand Down
4 changes: 2 additions & 2 deletions src/timers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -245,9 +245,9 @@ std::string LocalDateTimeString() {
tz_offset_sign = '-';
}

tz_len =
tz_len = static_cast<size_t>(
::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li",
tz_offset_sign, offset_minutes / 100, offset_minutes % 100);
tz_offset_sign, offset_minutes / 100, offset_minutes % 100));
BM_CHECK(tz_len == kTzOffsetLen);
((void)tz_len); // Prevent unused variable warning in optimized build.
} else {
Expand Down
1 change: 1 addition & 0 deletions test/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ TEST_COPTS = [
## assert() are used a lot in tests upstream, which may be optimised out leading to
## unused-variable warning.
"-Wno-unused-variable",
"-Werror=old-style-cast",
]

# Some of the issues with DoNotOptimize only occur when optimization is enabled
Expand Down
2 changes: 1 addition & 1 deletion test/benchmark_gtest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ TEST(AddRangeTest, Advanced64) {

TEST(AddRangeTest, FullRange8) {
std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), int8_t{8});
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(
dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127}));
}
Expand Down
24 changes: 12 additions & 12 deletions test/complexity_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,11 @@ void BM_Complexity_O1(benchmark::State &state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
}

Expand Down Expand Up @@ -120,16 +120,16 @@ void BM_Complexity_O_N(benchmark::State &state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
}

// 1ns per iteration per entry
state.SetIterationTime(state.range(0) * 42 * 1e-9);
state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
Expand Down Expand Up @@ -178,16 +178,16 @@ static void BM_Complexity_O_N_log_N(benchmark::State &state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
}

state.SetIterationTime(state.range(0) * kLog2E * std::log(state.range(0)) *
42 * 1e-9);
state.SetIterationTime(static_cast<double>(state.range(0)) * kLog2E *
std::log(state.range(0)) * 42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
Expand Down Expand Up @@ -238,15 +238,15 @@ void BM_ComplexityCaptureArgs(benchmark::State &state, int n) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations();
double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations();
tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp);
}

state.SetIterationTime(state.range(0) * 42 * 1e-9);
state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
}
state.SetComplexityN(n);
}
Expand Down
Loading