19 template <std::
size_t MAX_BLOCK_SIZE_BYTES, std::
size_t ALIGN_BYTES>
20 class PoolResourceFuzzer
24 uint64_t m_sequence{0};
25 size_t m_total_allocated{};
32 Entry(
Span<std::byte> s,
size_t a, uint64_t se) : span(s), alignment(a), seed(se) {}
35 std::vector<Entry> m_entries;
39 : m_provider{provider},
40 m_test_resource{provider.ConsumeIntegralInRange<size_t>(MAX_BLOCK_SIZE_BYTES, 262144)}
44 void Allocate(
size_t size,
size_t alignment)
48 assert((alignment & (alignment - 1)) == 0);
49 assert((size & (alignment - 1)) == 0);
51 auto span =
Span(
static_cast<std::byte*
>(m_test_resource.
Allocate(size, alignment)), size);
52 m_total_allocated += size;
54 auto ptr_val =
reinterpret_cast<std::uintptr_t
>(span.data());
55 assert((ptr_val & (alignment - 1)) == 0);
57 uint64_t seed = m_sequence++;
58 RandomContentFill(m_entries.emplace_back(span, alignment, seed));
64 if (m_total_allocated > 0x1000000)
return;
65 size_t alignment_bits = m_provider.ConsumeIntegralInRange<
size_t>(0, 7);
66 size_t alignment = 1 << alignment_bits;
67 size_t size_bits = m_provider.ConsumeIntegralInRange<
size_t>(0, 16 - alignment_bits);
68 size_t size = m_provider.ConsumeIntegralInRange<
size_t>(1U << size_bits, (1U << (size_bits + 1)) - 1U) << alignment_bits;
69 Allocate(size, alignment);
72 void RandomContentFill(Entry& entry)
75 auto ptr = entry.span.data();
76 auto size = entry.span.size();
80 std::memcpy(ptr, &r, 8);
86 std::memcpy(ptr, &r, size);
90 void RandomContentCheck(
const Entry& entry)
93 auto ptr = entry.span.data();
94 auto size = entry.span.size();
99 std::memcpy(buf, &r, 8);
100 assert(std::memcmp(buf, ptr, 8) == 0);
106 std::memcpy(buf, &r, size);
107 assert(std::memcmp(buf, ptr, size) == 0);
111 void Deallocate(
const Entry& entry)
113 auto ptr_val =
reinterpret_cast<std::uintptr_t
>(entry.span.data());
114 assert((ptr_val & (entry.alignment - 1)) == 0);
115 RandomContentCheck(entry);
116 m_total_allocated -= entry.span.size();
117 m_test_resource.
Deallocate(entry.span.data(), entry.span.size(), entry.alignment);
122 if (m_entries.empty()) {
126 size_t idx = m_provider.ConsumeIntegralInRange<
size_t>(0, m_entries.size() - 1);
127 Deallocate(m_entries[idx]);
128 if (idx != m_entries.size() - 1) {
129 m_entries[idx] = std::move(m_entries.back());
131 m_entries.pop_back();
136 while (!m_entries.empty()) {
150 [&] { Deallocate(); });
164 [&] { PoolResourceFuzzer<128, 1>{provider}.Fuzz(); },
165 [&] { PoolResourceFuzzer<128, 2>{provider}.Fuzz(); },
166 [&] { PoolResourceFuzzer<128, 4>{provider}.Fuzz(); },
167 [&] { PoolResourceFuzzer<128, 8>{provider}.Fuzz(); },
169 [&] { PoolResourceFuzzer<8, 8>{provider}.Fuzz(); },
170 [&] { PoolResourceFuzzer<16, 16>{provider}.Fuzz(); },
172 [&] { PoolResourceFuzzer<256,
alignof(max_align_t)>{provider}.Fuzz(); },
173 [&] { PoolResourceFuzzer<256, 64>{provider}.Fuzz(); });
A memory resource similar to std::pmr::unsynchronized_pool_resource, but optimized for node-based con...
void Deallocate(void *p, std::size_t bytes, std::size_t alignment) noexcept
Returns a block to the freelists, or deletes the block when it did not come from the chunks.
void * Allocate(std::size_t bytes, std::size_t alignment)
Allocates a block of bytes.
static void CheckAllDataAccountedFor(const PoolResource< MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES > &resource)
Once all blocks are given back to the resource, tests that the freelists are consistent:
A Span is an object that can refer to a contiguous sequence of objects.
#define LIMITED_WHILE(condition, limit)
Can be used to limit a theoretically unbounded loop.
FUZZ_TARGET(pool_resource)
Span(T *, EndOrSize) -> Span< T >
size_t CallOneOf(FuzzedDataProvider &fuzzed_data_provider, Callables... callables)