|
| 1 | +//===----- LayoutFieldRandomizer.cpp - Randstruct Implementation -*- C++ |
| 2 | +//-*-===// |
| 3 | +// |
| 4 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 5 | +// See https://llvm.org/LICENSE.txt for license information. |
| 6 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 7 | +// |
| 8 | +//===----------------------------------------------------------------------===// |
| 9 | +// |
| 10 | +// Cache line best-effort field randomization |
| 11 | +// |
| 12 | +//===----------------------------------------------------------------------===// |
| 13 | + |
| 14 | +#include "LayoutFieldRandomizer.h" |
| 15 | +#include "llvm/ADT/SmallVector.h" |
| 16 | + |
| 17 | +#include <algorithm> |
| 18 | +#include <cstdint> |
| 19 | +#include <random> |
| 20 | +#include <vector> |
| 21 | + |
| 22 | +namespace clang { |
| 23 | + |
| 24 | +/// Bucket to store fields up to size of a cache line during randomization. |
| 25 | +class Bucket { |
| 26 | +public: |
| 27 | + virtual ~Bucket() = default; |
| 28 | + /// Returns a randomized version of the bucket. |
| 29 | + virtual SmallVector<FieldDecl *, 64> randomize(); |
| 30 | + /// Checks if an added element would fit in a cache line. |
| 31 | + virtual bool canFit(size_t size) const; |
| 32 | + /// Adds a field to the bucket. |
| 33 | + void add(FieldDecl *field, size_t size); |
| 34 | + /// Is this bucket for bitfields? |
| 35 | + virtual bool isBitfieldRun() const; |
| 36 | + /// Is this bucket full? |
| 37 | + bool full() const; |
| 38 | + bool empty() const; |
| 39 | + |
| 40 | +protected: |
| 41 | + size_t size; |
| 42 | + SmallVector<FieldDecl *, 64> fields; |
| 43 | +}; |
| 44 | + |
| 45 | +/// BitfieldRun is a bucket for storing adjacent bitfields that may |
| 46 | +/// exceed the size of a cache line. |
| 47 | +class BitfieldRun : public Bucket { |
| 48 | +public: |
| 49 | + virtual SmallVector<FieldDecl *, 64> randomize() override; |
| 50 | + virtual bool canFit(size_t size) const override; |
| 51 | + virtual bool isBitfieldRun() const override; |
| 52 | +}; |
| 53 | + |
| 54 | +// TODO: Is there a way to detect this? (i.e. on 32bit system vs 64?) |
| 55 | +const size_t CACHE_LINE = 64; |
| 56 | + |
| 57 | +SmallVector<FieldDecl *, 64> Bucket::randomize() { |
| 58 | + // TODO use seed |
| 59 | + auto rng = std::default_random_engine{}; |
| 60 | + std::shuffle(std::begin(fields), std::end(fields), rng); |
| 61 | + return fields; |
| 62 | +} |
| 63 | + |
| 64 | +bool Bucket::canFit(size_t size) const { |
| 65 | + // We will say we can fit any size if the bucket is empty |
| 66 | + // because there are many instances where a field is much |
| 67 | + // larger than 64 bits (i.e., an array, a structure, etc) |
| 68 | + // but it still must be placed into a bucket. |
| 69 | + // |
| 70 | + // Otherwise, if the bucket has elements and we're still |
| 71 | + // trying to create a cache-line sized grouping, we cannot |
| 72 | + // fit a larger field in here. |
| 73 | + return empty() || this->size + size <= CACHE_LINE; |
| 74 | +} |
| 75 | + |
| 76 | +void Bucket::add(FieldDecl *field, size_t size) { |
| 77 | + fields.push_back(field); |
| 78 | + this->size += size; |
| 79 | +} |
| 80 | + |
| 81 | +bool Bucket::isBitfieldRun() const { |
| 82 | + // The normal bucket is not a bitfieldrun. This is to avoid RTTI. |
| 83 | + return false; |
| 84 | +} |
| 85 | + |
| 86 | +bool Bucket::full() const { |
| 87 | + // We're full if our size is a cache line. |
| 88 | + return size >= CACHE_LINE; |
| 89 | +} |
| 90 | + |
| 91 | +bool Bucket::empty() const { return size == 0; } |
| 92 | + |
| 93 | +SmallVector<FieldDecl *, 64> BitfieldRun::randomize() { |
| 94 | + // Keep bit fields adjacent, we will not scramble them. |
| 95 | + return fields; |
| 96 | +} |
| 97 | + |
| 98 | +bool BitfieldRun::canFit(size_t size) const { |
| 99 | + // We can always fit another adjacent bitfield. |
| 100 | + return true; |
| 101 | +} |
| 102 | + |
| 103 | +bool BitfieldRun::isBitfieldRun() const { |
| 104 | + // Yes. |
| 105 | + return true; |
| 106 | +} |
| 107 | + |
| 108 | +SmallVector<Decl *, 64> randomize(SmallVector<Decl *, 64> fields) { |
| 109 | + auto rng = std::default_random_engine{}; |
| 110 | + std::shuffle(std::begin(fields), std::end(fields), rng); |
| 111 | + return fields; |
| 112 | +} |
| 113 | + |
| 114 | +SmallVector<Decl *, 64> perfrandomize(const ASTContext &ctx, |
| 115 | + SmallVector<Decl *, 64> fields) { |
| 116 | + // All of the buckets produced by best-effort cache-line algorithm. |
| 117 | + std::vector<std::unique_ptr<Bucket>> buckets; |
| 118 | + |
| 119 | + // The current bucket of fields that we are trying to fill to a cache-line. |
| 120 | + std::unique_ptr<Bucket> currentBucket = nullptr; |
| 121 | + // The current bucket containing the run of adjacent bitfields to ensure |
| 122 | + // they remain adjacent. |
| 123 | + std::unique_ptr<Bucket> currentBitfieldRun = nullptr; |
| 124 | + |
| 125 | + // Tracks the number of fields that we failed to fit to the current bucket, |
| 126 | + // and thus still need to be added later. |
| 127 | + size_t skipped = 0; |
| 128 | + |
| 129 | + while (!fields.empty()) { |
| 130 | + // If we've skipped more fields than we have remaining to place, |
| 131 | + // that means that they can't fit in our current bucket, and we |
| 132 | + // need to start a new one. |
| 133 | + if (skipped >= fields.size()) { |
| 134 | + skipped = 0; |
| 135 | + buckets.push_back(std::move(currentBucket)); |
| 136 | + } |
| 137 | + |
| 138 | + // Take the first field that needs to be put in a bucket. |
| 139 | + auto field = fields.begin(); |
| 140 | + auto *f = llvm::cast<FieldDecl>(*field); |
| 141 | + |
| 142 | + if (f->isBitField()) { |
| 143 | + // Start a bitfield run if this is the first bitfield |
| 144 | + // we have found. |
| 145 | + if (!currentBitfieldRun) { |
| 146 | + currentBitfieldRun = llvm::make_unique<BitfieldRun>(); |
| 147 | + } |
| 148 | + |
| 149 | + // We've placed the field, and can remove it from the |
| 150 | + // "awaiting buckets" vector called "fields" |
| 151 | + currentBitfieldRun->add(f, 1); |
| 152 | + fields.erase(field); |
| 153 | + } else { |
| 154 | + // Else, current field is not a bitfield |
| 155 | + // If we were previously in a bitfield run, end it. |
| 156 | + if (currentBitfieldRun) { |
| 157 | + buckets.push_back(std::move(currentBitfieldRun)); |
| 158 | + } |
| 159 | + // If we don't have a bucket, make one. |
| 160 | + if (!currentBucket) { |
| 161 | + currentBucket = llvm::make_unique<Bucket>(); |
| 162 | + } |
| 163 | + |
| 164 | + // FIXME get access to AST Context |
| 165 | + auto width = ctx.getTypeInfo(f->getType()).Width; |
| 166 | + |
| 167 | + // If we can fit, add it. |
| 168 | + if (currentBucket->canFit(width)) { |
| 169 | + currentBucket->add(f, width); |
| 170 | + fields.erase(field); |
| 171 | + |
| 172 | + // If it's now full, tie off the bucket. |
| 173 | + if (currentBucket->full()) { |
| 174 | + skipped = 0; |
| 175 | + buckets.push_back(std::move(currentBucket)); |
| 176 | + } |
| 177 | + } else { |
| 178 | + // We can't fit it in our current bucket. |
| 179 | + // Move to the end for processing later. |
| 180 | + ++skipped; // Mark it skipped. |
| 181 | + fields.push_back(f); |
| 182 | + fields.erase(field); |
| 183 | + } |
| 184 | + } |
| 185 | + } |
| 186 | + |
| 187 | + // Done processing the fields awaiting a bucket. |
| 188 | + |
| 189 | + // If we were filling a bucket, tie it off. |
| 190 | + if (currentBucket) { |
| 191 | + buckets.push_back(std::move(currentBucket)); |
| 192 | + } |
| 193 | + |
| 194 | + // If we were processing a bitfield run bucket, tie it off. |
| 195 | + if (currentBitfieldRun) { |
| 196 | + buckets.push_back(std::move(currentBitfieldRun)); |
| 197 | + } |
| 198 | + |
| 199 | + auto rng = std::default_random_engine{}; |
| 200 | + std::shuffle(std::begin(buckets), std::end(buckets), rng); |
| 201 | + |
| 202 | + // Produce the new ordering of the elements from our buckets. |
| 203 | + SmallVector<Decl *, 64> finalOrder; |
| 204 | + for (auto &bucket : buckets) { |
| 205 | + auto randomized = bucket->randomize(); |
| 206 | + finalOrder.insert(finalOrder.end(), randomized.begin(), randomized.end()); |
| 207 | + } |
| 208 | + |
| 209 | + return finalOrder; |
| 210 | +} |
| 211 | + |
| 212 | +SmallVector<Decl *, 64> rearrange(const ASTContext &ctx, |
| 213 | + SmallVector<Decl *, 64> fields) { |
| 214 | + return perfrandomize(ctx, fields); |
| 215 | +} |
| 216 | + |
| 217 | +} // namespace clang |
0 commit comments