Modify the tests so that all clang warnings can be turned up to high.
Fix all places flagged by -Wconversion.
Fix a few unused variables not marked with UNUSED.
For the memtag testing, only compile some tests for 64 bit since
compiling them on 32 bit leads to warnings/errors. All of the tests
are already skipped on 32 bit OSes, so this will not affect any
real tests.
Reviewed By: Chia-hungDuan
Differential Revision: https://reviews.llvm.org/D155749
for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
const scudo::uptr Align = 1U << AlignLog;
for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
- if (static_cast<scudo::sptr>(1U << SizeLog) + Delta < 0)
+ if ((1LL << SizeLog) + Delta < 0)
continue;
- const scudo::uptr Size = (1U << SizeLog) + Delta;
+ const scudo::uptr Size =
+ static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
void *P = Allocator->allocate(Size, Origin, Align);
EXPECT_NE(P, nullptr);
EXPECT_TRUE(Allocator->isOwned(P));
const char Marker = 0xab;
memset(P, Marker, ReallocSize);
for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
- const scudo::uptr NewSize = ReallocSize + Delta;
+ const scudo::uptr NewSize =
+ static_cast<scudo::uptr>(static_cast<scudo::sptr>(ReallocSize) + Delta);
void *NewP = Allocator->reallocate(P, NewSize);
EXPECT_EQ(NewP, P);
for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
- rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
+ static_cast<scudo::uptr>(std::rand()) %
+ (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
+ Origin));
Allocator->disable();
Allocator->iterateOverChunks(
0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
- [](uintptr_t Base, size_t Size, void *Arg) {
+ [](uintptr_t Base, UNUSED size_t Size, void *Arg) {
std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
void *P = reinterpret_cast<void *>(Base);
EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
- rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
+ static_cast<scudo::uptr>(std::rand()) %
+ (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
+ Origin));
for (auto P : V)
Allocator->deallocate(P, Origin);
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
- rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
+ static_cast<scudo::uptr>(std::rand()) %
+ (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
+ Origin));
for (auto P : V)
Allocator->deallocate(P, Origin);
}
std::vector<std::pair<void *, scudo::uptr>> V;
for (scudo::uptr I = 0; I < 256U; I++) {
- const scudo::uptr Size = std::rand() % 4096U;
+ const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
void *P = Allocator->allocate(Size, Origin);
// A region could have ran out of memory, resulting in a null P.
if (P)
// Regression test: make realloc-in-place happen at the very right end of a
// mapped region.
- constexpr int nPtrs = 10000;
- for (int i = 1; i < 32; ++i) {
+ constexpr size_t nPtrs = 10000;
+ for (scudo::uptr i = 1; i < 32; ++i) {
scudo::uptr Size = 16 * i - 1;
std::vector<void *> Ptrs;
- for (int i = 0; i < nPtrs; ++i) {
+ for (size_t i = 0; i < nPtrs; ++i) {
void *P = Allocator->allocate(Size, Origin);
P = Allocator->reallocate(P, Size + 1);
Ptrs.push_back(P);
}
- for (int i = 0; i < nPtrs; ++i)
+ for (size_t i = 0; i < nPtrs; ++i)
Allocator->deallocate(Ptrs[i], Origin);
}
}
}
TEST_F(MemtagTest, ExtractTag) {
+// The test is already skipped on anything other than 64 bit. But
+// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
+#if defined(__LP64__)
uptr Tags = 0;
// Try all value for the top byte and check the tags values are in the
// expected range.
for (u64 Top = 0; Top < 0x100; ++Top)
Tags = Tags | (1u << extractTag(Addr | (Top << 56)));
EXPECT_EQ(0xffffull, Tags);
+#endif
}
TEST_F(MemtagDeathTest, AddFixedTag) {
}
TEST_F(MemtagTest, SelectRandomTagWithMask) {
+// The test is already skipped on anything other than 64 bit. But
+// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
+#if defined(__LP64__)
for (uptr j = 0; j < 32; ++j) {
for (uptr i = 0; i < 1000; ++i)
EXPECT_NE(j, extractTag(selectRandomTag(Addr, 1ull << j)));
}
+#endif
}
TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(LoadStoreTagUnaligned)) {
}
TEST_F(MemtagTest, StoreTags) {
+// The test is already skipped on anything other than 64 bit. But
+// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
+#if defined(__LP64__)
const uptr MaxTaggedSize = 4 * archMemoryTagGranuleSize();
for (uptr Size = 0; Size <= MaxTaggedSize; ++Size) {
uptr NoTagBegin = Addr + archMemoryTagGranuleSize();
// Reset tags without using StoreTags.
MemMap.releasePagesToOS(Addr, BufferSize);
}
+#endif
}
} // namespace scudo
Cache.init(nullptr, Allocator.get());
std::vector<std::pair<scudo::uptr, void *>> V;
for (scudo::uptr I = 0; I < 64U; I++) {
- const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize;
+ const scudo::uptr Size =
+ static_cast<scudo::uptr>(std::rand()) % Primary::SizeClassMap::MaxSize;
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
void *P = Cache.allocate(ClassId);
V.push_back(std::make_pair(ClassId, P));
Cv.wait(Lock);
}
for (scudo::uptr I = 0; I < 256U; I++) {
- const scudo::uptr Size =
- std::rand() % Primary::SizeClassMap::MaxSize / 4;
+ const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) %
+ Primary::SizeClassMap::MaxSize / 4;
const scudo::uptr ClassId =
Primary::SizeClassMap::getClassIdBySize(Size);
void *P = Cache.allocate(ClassId);
// Strip trailing '.'-pages before comparing the results as they are not
// going to be reported to range_recorder anyway.
const char *LastX = strrchr(TestCase, 'x');
- std::string Expected(TestCase,
- LastX == nullptr ? 0 : (LastX - TestCase + 1));
+ std::string Expected(
+ TestCase,
+ LastX == nullptr ? 0U : static_cast<size_t>(LastX - TestCase + 1));
EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
}
}
AlignLog++) {
const scudo::uptr Align = 1U << AlignLog;
for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
- if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
+ if ((1LL << SizeLog) + Delta <= 0)
continue;
- const scudo::uptr UserSize =
- scudo::roundUp((1U << SizeLog) + Delta, MinAlign);
+ const scudo::uptr UserSize = scudo::roundUp(
+ static_cast<scudo::uptr>((1LL << SizeLog) + Delta), MinAlign);
const scudo::uptr Size =
HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
void *P = Allocator->allocate(Options, Size, Align);
std::vector<void *> V;
const scudo::uptr PageSize = scudo::getPageSizeCached();
for (scudo::uptr I = 0; I < 32U; I++)
- V.push_back(Allocator->allocate(Options, (std::rand() % 16) * PageSize));
+ V.push_back(Allocator->allocate(
+ Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize));
auto Lambda = [&V](scudo::uptr Block) {
EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
V.end());
}
for (scudo::uptr I = 0; I < 128U; I++) {
// Deallocate 75% of the blocks.
- const bool Deallocate = (rand() & 3) != 0;
- void *P = Allocator->allocate(Options, (std::rand() % 16) * PageSize);
+ const bool Deallocate = (std::rand() & 3) != 0;
+ void *P = Allocator->allocate(
+ Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize);
if (Deallocate)
Allocator->deallocate(Options, P);
else
void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
void initCache(CacheT *Cache) { *Cache = {}; }
- void commitBack(scudo::TSD<MockAllocator> *TSD) {}
+ void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
void callPostInitCallback() {}
TEST(ScudoWrappersCTest, OtherAlloc) {
#if HAVE_PVALLOC
- const size_t PageSize = sysconf(_SC_PAGESIZE);
+ const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
void *P = pvalloc(Size);
EXPECT_NE(P, nullptr);
static uintptr_t BoundaryP;
static size_t Count;
-static void callback(uintptr_t Base, size_t Size, void *Arg) {
+static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
if (scudo::archSupportsMemoryTagging()) {
Base = scudo::untagPointer(Base);
BoundaryP = scudo::untagPointer(BoundaryP);
// aligned on a page, then run the malloc_iterate on both the pages that the
// block is a boundary for. It must only be seen once by the callback function.
TEST(ScudoWrappersCTest, MallocIterateBoundary) {
- const size_t PageSize = sysconf(_SC_PAGESIZE);
+ const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
#if SCUDO_ANDROID
// Android uses a 16 byte alignment for both 32 bit and 64 bit.
const size_t BlockDelta = 16U;
static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
static bool Ready;
-static void *enableMalloc(void *Unused) {
+static void *enableMalloc(UNUSED void *Unused) {
// Initialize the allocator for this thread.
void *P = malloc(Size);
EXPECT_NE(P, nullptr);
Cv.wait(Lock);
}
for (size_t I = 0; I < 256U; I++) {
- const size_t N = std::rand() % 128U;
+ const size_t N = static_cast<size_t>(std::rand()) % 128U;
uintptr_t *P = new uintptr_t[N];
if (P) {
memset(P, 0x42, sizeof(uintptr_t) * N);