-
Notifications
You must be signed in to change notification settings - Fork 12.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AMDGPU] Utilities to asan instrument memory instructions. #98863
Conversation
@llvm/pr-subscribers-backend-amdgpu Author: Chaitanya (skc7) ChangesThis PR adds the utilities required to asan instrument memory instructions. These utility APIs are picked from AddressSanitizer.cpp. Full diff: https://github.com/llvm/llvm-project/pull/98863.diff 3 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsanInstrumentation.cpp
new file mode 100644
index 0000000000000..09d674f42d67b
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsanInstrumentation.cpp
@@ -0,0 +1,282 @@
+#include "AMDGPUAsanInstrumentation.h"
+
+#define DEBUG_TYPE "amdgpu-asan-instrumentation"
+
+using namespace llvm;
+
+namespace llvm {
+namespace AMDGPU {
+
+const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
+const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
+const char kAMDGPULDSKernelId[] = "llvm.amdgcn.lds.kernel.id";
+
+static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF;
+static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL;
+
+static uint64_t getRedzoneSizeForScale(int AsanScale) {
+ // Redzone used for stack and globals is at least 32 bytes.
+ // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
+ return std::max(32U, 1U << AsanScale);
+}
+
+static uint64_t getMinRedzoneSizeForGlobal(int AsanScale) {
+ return getRedzoneSizeForScale(AsanScale);
+}
+
+uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes) {
+ constexpr uint64_t kMaxRZ = 1 << 18;
+ const uint64_t MinRZ = getMinRedzoneSizeForGlobal(AsanScale);
+
+ uint64_t RZ = 0;
+ if (SizeInBytes <= MinRZ / 2) {
+ // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
+ // at least 32 bytes, optimize when SizeInBytes is less than or equal to
+ // half of MinRZ.
+ RZ = MinRZ - SizeInBytes;
+ } else {
+ // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
+ RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
+
+ // Round up to multiple of MinRZ.
+ if (SizeInBytes % MinRZ)
+ RZ += MinRZ - (SizeInBytes % MinRZ);
+ }
+
+ assert((RZ + SizeInBytes) % MinRZ == 0);
+
+ return RZ;
+}
+
+static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize) {
+ size_t Res = llvm::countr_zero(TypeSize / 8);
+ return Res;
+}
+
+static Instruction *genAMDGPUReportBlock(Module &M, IRBuilder<> &IRB,
+ Value *Cond, bool Recover) {
+ Value *ReportCond = Cond;
+ if (!Recover) {
+ auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
+ IRB.getInt1Ty());
+ ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
+ }
+
+ auto *Trm = SplitBlockAndInsertIfThen(
+ ReportCond, &*IRB.GetInsertPoint(), false,
+ MDBuilder(M.getContext()).createBranchWeights(1, 100000));
+ Trm->getParent()->setName("asan.report");
+
+ if (Recover)
+ return Trm;
+
+ Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
+ IRB.SetInsertPoint(Trm);
+ return IRB.CreateCall(
+ M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
+}
+
+static Value *createSlowPathCmp(Module &M, IRBuilder<> &IRB, Value *AddrLong,
+ Value *ShadowValue, uint32_t TypeStoreSize,
+ int AsanScale) {
+
+ unsigned int LongSize = M.getDataLayout().getPointerSizeInBits();
+ IntegerType *IntptrTy = Type::getIntNTy(M.getContext(), LongSize);
+ size_t Granularity = static_cast<size_t>(1) << AsanScale;
+ // Addr & (Granularity - 1)
+ Value *LastAccessedByte =
+ IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
+ // (Addr & (Granularity - 1)) + size - 1
+ if (TypeStoreSize / 8 > 1)
+ LastAccessedByte = IRB.CreateAdd(
+ LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
+ // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
+ LastAccessedByte =
+ IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
+ // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
+ return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
+}
+
+static Instruction *generateCrashCode(Module &M, IRBuilder<> &IRB,
+ Instruction *InsertBefore, Value *Addr,
+ bool IsWrite, size_t AccessSizeIndex,
+ Value *SizeArgument, bool Recover) {
+ IRB.SetInsertPoint(InsertBefore);
+ CallInst *Call = nullptr;
+ int LongSize = M.getDataLayout().getPointerSizeInBits();
+ Type *IntptrTy = Type::getIntNTy(M.getContext(), LongSize);
+ const char kAsanReportErrorTemplate[] = "__asan_report_";
+ const std::string TypeStr = IsWrite ? "store" : "load";
+ const std::string EndingStr = Recover ? "_noabort" : "";
+ SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
+ AttributeList AL2;
+ FunctionCallee AsanErrorCallbackSized = M.getOrInsertFunction(
+ kAsanReportErrorTemplate + TypeStr + "_n" + EndingStr,
+ FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
+ const std::string Suffix = TypeStr + llvm::itostr(1ULL << AccessSizeIndex);
+ SmallVector<Type *, 2> Args1{1, IntptrTy};
+ AttributeList AL1;
+ FunctionCallee AsanErrorCallback = M.getOrInsertFunction(
+ kAsanReportErrorTemplate + Suffix + EndingStr,
+ FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
+ if (SizeArgument) {
+ Call = IRB.CreateCall(AsanErrorCallbackSized, {Addr, SizeArgument});
+ } else {
+ Call = IRB.CreateCall(AsanErrorCallback, Addr);
+ }
+
+ Call->setCannotMerge();
+ return Call;
+}
+
+static Value *memToShadow(Module &M, IRBuilder<> &IRB, Value *Shadow,
+ int AsanScale, uint32_t AsanOffset) {
+ int LongSize = M.getDataLayout().getPointerSizeInBits();
+ Type *IntptrTy = Type::getIntNTy(M.getContext(), LongSize);
+ // Shadow >> scale
+ Shadow = IRB.CreateLShr(Shadow, AsanScale);
+ if (AsanOffset == 0)
+ return Shadow;
+ // (Shadow >> scale) | offset
+ Value *ShadowBase = ConstantInt::get(IntptrTy, AsanOffset);
+ return IRB.CreateAdd(Shadow, ShadowBase);
+}
+
+void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
+ Instruction *InsertBefore, Value *Addr,
+ MaybeAlign Alignment, uint32_t TypeStoreSize,
+ bool IsWrite, Value *SizeArgument, bool UseCalls,
+ bool Recover, int AsanScale, int AsanOffset) {
+ int LongSize = M.getDataLayout().getPointerSizeInBits();
+ Type *IntptrTy = Type::getIntNTy(M.getContext(), LongSize);
+ IRB.SetInsertPoint(InsertBefore);
+ size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
+ Type *ShadowTy = IntegerType::get(M.getContext(),
+ std::max(8U, TypeStoreSize >> AsanScale));
+ Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ Value *ShadowPtr = memToShadow(M, IRB, AddrLong, AsanScale, AsanOffset);
+ const uint64_t ShadowAlign =
+ std::max<uint64_t>(Alignment.valueOrOne().value() >> AsanScale, 1);
+ Value *ShadowValue = IRB.CreateAlignedLoad(
+ ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
+ Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
+ auto *Cmp2 = createSlowPathCmp(M, IRB, AddrLong, ShadowValue, TypeStoreSize,
+ AsanScale);
+ Cmp = IRB.CreateAnd(Cmp, Cmp2);
+ Instruction *CrashTerm = genAMDGPUReportBlock(M, IRB, Cmp, Recover);
+ Instruction *Crash =
+ generateCrashCode(M, IRB, CrashTerm, AddrLong, IsWrite, AccessSizeIndex,
+ SizeArgument, Recover);
+ if (OrigIns->getDebugLoc())
+ Crash->setDebugLoc(OrigIns->getDebugLoc());
+ return;
+}
+
+void getInterestingMemoryOperands(
+ Module &M, Instruction *I,
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+ const DataLayout &DL = M.getDataLayout();
+ unsigned int LongSize = M.getDataLayout().getPointerSizeInBits();
+ Type *IntptrTy = Type::getIntNTy(M.getContext(), LongSize);
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
+ LI->getType(), LI->getAlign());
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
+ SI->getValueOperand()->getType(), SI->getAlign());
+ } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
+ Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
+ RMW->getValOperand()->getType(), std::nullopt);
+ } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
+ Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
+ XCHG->getCompareOperand()->getType(),
+ std::nullopt);
+ } else if (auto CI = dyn_cast<CallInst>(I)) {
+ switch (CI->getIntrinsicID()) {
+ case Intrinsic::masked_load:
+ case Intrinsic::masked_store:
+ case Intrinsic::masked_gather:
+ case Intrinsic::masked_scatter: {
+ bool IsWrite = CI->getType()->isVoidTy();
+ // Masked store has an initial operand for the value.
+ unsigned OpOffset = IsWrite ? 1 : 0;
+ Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
+ MaybeAlign Alignment = Align(1);
+ // Otherwise no alignment guarantees. We probably got Undef.
+ if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
+ Alignment = Op->getMaybeAlignValue();
+ Value *Mask = CI->getOperand(2 + OpOffset);
+ Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
+ break;
+ }
+ case Intrinsic::masked_expandload:
+ case Intrinsic::masked_compressstore: {
+ bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
+ unsigned OpOffset = IsWrite ? 1 : 0;
+ auto BasePtr = CI->getOperand(OpOffset);
+ MaybeAlign Alignment = BasePtr->getPointerAlignment(DL);
+ Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
+ IRBuilder<> IB(I);
+ Value *Mask = CI->getOperand(1 + OpOffset);
+ // Use the popcount of Mask as the effective vector length.
+ Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
+ Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
+ Value *EVL = IB.CreateAddReduce(ExtMask);
+ Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
+ Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
+ EVL);
+ break;
+ }
+ case Intrinsic::vp_load:
+ case Intrinsic::vp_store:
+ case Intrinsic::experimental_vp_strided_load:
+ case Intrinsic::experimental_vp_strided_store: {
+ auto *VPI = cast<VPIntrinsic>(CI);
+ unsigned IID = CI->getIntrinsicID();
+ bool IsWrite = CI->getType()->isVoidTy();
+ unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
+ Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
+ MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(DL);
+ Value *Stride = nullptr;
+ if (IID == Intrinsic::experimental_vp_strided_store ||
+ IID == Intrinsic::experimental_vp_strided_load) {
+ Stride = VPI->getOperand(PtrOpNo + 1);
+ // Use the pointer alignment as the element alignment if the stride is a
+ // mutiple of the pointer alignment. Otherwise, the element alignment
+ // should be Align(1).
+ unsigned PointerAlign = Alignment.valueOrOne().value();
+ if (!isa<ConstantInt>(Stride) ||
+ cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
+ Alignment = Align(1);
+ }
+ Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
+ VPI->getMaskParam(), VPI->getVectorLengthParam(),
+ Stride);
+ break;
+ }
+ case Intrinsic::vp_gather:
+ case Intrinsic::vp_scatter: {
+ auto *VPI = cast<VPIntrinsic>(CI);
+ unsigned IID = CI->getIntrinsicID();
+ bool IsWrite = IID == Intrinsic::vp_scatter;
+ unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
+ Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
+ MaybeAlign Alignment = VPI->getPointerAlignment();
+ Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
+ VPI->getMaskParam(),
+ VPI->getVectorLengthParam());
+ break;
+ }
+ default:
+ for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
+ if (!CI->isByValArgument(ArgNo))
+ continue;
+ Type *Ty = CI->getParamByValType(ArgNo);
+ Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
+ }
+ }
+ }
+}
+} // end namespace AMDGPU
+} // end namespace llvm
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsanInstrumentation.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsanInstrumentation.h
new file mode 100644
index 0000000000000..fa417bf97205b
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsanInstrumentation.h
@@ -0,0 +1,60 @@
+//===- AMDGPUAsanInstrumentation.h - Address Sanitizer related helper functions
+//-*- C++ -*----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPU_ASAN_INSTRUMENTATION_H
+#define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPU_ASAN_INSTRUMENTATION_H
+
+#include "AMDGPU.h"
+#include "AMDGPUBaseInfo.h"
+#include "AMDGPUMemoryUtils.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/OptimizedStructLayout.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+
+namespace llvm {
+namespace AMDGPU {
+
+/// Given SizeInBytes of the Value to be instrunmented,
+/// Returns the redzone size corresponding to it.
+uint64_t getRedzoneSizeForGlobal(int Scale, uint64_t SizeInBytes);
+
+/// Instrument the memory operand Addr.
+/// Generates report blocks that catch the addressing errors.
+void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
+ Instruction *InsertBefore, Value *Addr,
+ MaybeAlign Alignment, uint32_t TypeStoreSize,
+ bool IsWrite, Value *SizeArgument, bool UseCalls,
+ bool Recover, int Scale, int Offset);
+
+/// Get all the memory operands from the instruction
+/// that needs to be instrumented
+void getInterestingMemoryOperands(
+ Module &M, Instruction *I,
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+
+} // end namespace AMDGPU
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPU_ASAN_INSTRUMENTATION_H
diff --git a/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt b/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
index 09b8da9f5dd48..4d69fb67db860 100644
--- a/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
@@ -1,4 +1,5 @@
add_llvm_component_library(LLVMAMDGPUUtils
+ AMDGPUAsanInstrumentation.cpp
AMDGPUAsmUtils.cpp
AMDGPUBaseInfo.cpp
AMDGPUDelayedMCExpr.cpp
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These utility APIs are picked from AddressSanitizer.cpp.
Instead of copying them, can we move them somewhere common
@@ -0,0 +1,282 @@ | |||
#include "AMDGPUAsanInstrumentation.h" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missing file header
const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64"; | ||
const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable"; | ||
const char kAMDGPULDSKernelId[] = "llvm.amdgcn.lds.kernel.id"; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
StringLiteral?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These should also really just use getIntrinsic with the enum ID instead of hardcoding the string names
|
||
unsigned int LongSize = M.getDataLayout().getPointerSizeInBits(); | ||
IntegerType *IntptrTy = Type::getIntNTy(M.getContext(), LongSize); | ||
size_t Granularity = static_cast<size_t>(1) << AsanScale; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This should not be a host dependent size_t, but uint64_t
Value *ShadowValue, uint32_t TypeStoreSize, | ||
int AsanScale) { | ||
|
||
unsigned int LongSize = M.getDataLayout().getPointerSizeInBits(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Query specific address space
LastAccessedByte = IRB.CreateAdd( | ||
LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Use llvm.ptrmask?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
createSlowPathCmp function is creating a cmp operation to check if the last accessed byte is >= to the shadow value.
llvm.ptrmask masks bits of pointer . I'm not sure how it can be used here.
SmallVectorImpl<InterestingMemoryOperand> &Interesting) { | ||
const DataLayout &DL = M.getDataLayout(); | ||
unsigned int LongSize = M.getDataLayout().getPointerSizeInBits(); | ||
Type *IntptrTy = Type::getIntNTy(M.getContext(), LongSize); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You can directly query getIntPtrTy from the DataLayout
return; | ||
} | ||
|
||
void getInterestingMemoryOperands( |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm pretty sure there's a utility to grab all pointer operands from recognized memory instructions
VPI->getMaskParam(), | ||
VPI->getVectorLengthParam()); | ||
break; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missing all target intrinsics. There's a TLI hook to query pointer arguments
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Have added the intrinsics I could find in the backend which does memory operations. Haven't found the TLI hook that fits here.
} | ||
default: | ||
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) { | ||
if (!CI->isByValArgument(ArgNo)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We are more interested in byref than byval
@@ -0,0 +1,60 @@ | |||
//===- AMDGPUAsanInstrumentation.h - Address Sanitizer related helper functions | |||
//-*- C++ -*----===// |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Bad line wrap
SmallString<128> AsanErrorCallbackString; | ||
raw_svector_ostream AsanErrorCallbackOS(AsanErrorCallbackString); | ||
AsanErrorCallbackOS << kAsanReportErrorTemplate << TypeStr | ||
<< llvm::itostr(1ULL << AccessSizeIndex) << EndingStr; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
use format for this?
int LongSize = M.getDataLayout().getPointerSizeInBits(); | ||
Type *IntptrTy = Type::getIntNTy(M.getContext(), LongSize); | ||
Type *AddrTy = Addr->getType(); | ||
assert(AddrTy->isPointerTy() && "Address should be pointer type."); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is redundant with the assert in getPointerAddressSpace
if (Arg->getType()->isPointerTy()) { | ||
Type *Ty = Arg->getType(); | ||
Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This handling doesn't make sense. You can't just take the pointer type and use it the same way as the byval type. This needs testing, and explicit byref support
b288a88
to
ab03947
Compare
…lvm#98863)" This reverts commit ddb75ca.
Why did these go in |
Moved these files out of utils in #100323. Under review. |
#98863 merged AMDGPUAsanInstrumentation module which missed TransformUtils to be linked to AMDGPUUtils. This PR moves AMDGPUAsanInstrumentation files outside utils folder and adds them to AMDGPUCodegen lib.
Summary: This change adds the utilities required to asan instrument memory instructions. In "amdgpu-sw-lower-lds" pass #87265, during lowering from LDS to global memory, new instructions in global memory would be created which need to be asan instrumented. Test Plan: Reviewers: Subscribers: Tasks: Tags: Differential Revision: https://phabricator.intern.facebook.com/D60250756
Summary: #98863 merged AMDGPUAsanInstrumentation module which missed TransformUtils to be linked to AMDGPUUtils. This PR moves AMDGPUAsanInstrumentation files outside utils folder and adds them to AMDGPUCodegen lib. Test Plan: Reviewers: Subscribers: Tasks: Tags: Differential Revision: https://phabricator.intern.facebook.com/D60250608
This change adds the utilities required to asan instrument memory instructions. In "amdgpu-sw-lower-lds" pass llvm#87265, during lowering from LDS to global memory, new instructions in global memory would be created which need to be asan instrumented.
llvm#98863 merged AMDGPUAsanInstrumentation module which missed TransformUtils to be linked to AMDGPUUtils. This PR moves AMDGPUAsanInstrumentation files outside utils folder and adds them to AMDGPUCodegen lib.
This change adds the utilities required to asan instrument memory instructions. In "amdgpu-sw-lower-lds" pass llvm#87265, during lowering from LDS to global memory, new instructions in global memory would be created which need to be asan instrumented. Change-Id: I17f0371cdc15ea7af6c4e2a325af6ad96a5bfb7b
llvm#98863 merged AMDGPUAsanInstrumentation module which missed TransformUtils to be linked to AMDGPUUtils. This PR moves AMDGPUAsanInstrumentation files outside utils folder and adds them to AMDGPUCodegen lib. Change-Id: I5ad1bb76ce80c2c129739d256ec4d76bc360296e
This PR adds the utilities required to asan instrument memory instructions.
In "amdgpu-sw-lower-lds" pass #87265, during lowering from LDS to global memory, new instructions in global memory would be created. These need to be asan instrumented.
These utility APIs are picked from AddressSanitizer.cpp.