LLVM: lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

10

11#define DEBUG_TYPE "amdgpu-asan-instrumentation"

12

13using namespace llvm;

14

15namespace llvm {

17

19

20

21 return std::max(32U, 1U << AsanScale);

22}

23

27

29 constexpr uint64_t kMaxRZ = 1 << 18;

31

33 if (SizeInBytes <= MinRZ / 2) {

34

35

36

37 RZ = MinRZ - SizeInBytes;

38 } else {

39

40 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);

41

42

43 if (SizeInBytes % MinRZ)

44 RZ += MinRZ - (SizeInBytes % MinRZ);

45 }

46

47 assert((RZ + SizeInBytes) % MinRZ == 0);

48

49 return RZ;

50}

51

56

60 if (!Recover) {

61 auto *Ballot =

64 }

65

69 Trm->getParent()->setName("asan.report");

70

71 if (Recover)

72 return Trm;

73

76 return IRB.CreateIntrinsic(Intrinsic::amdgcn_unreachable, {});

77}

78

81 uint32_t TypeStoreSize, int AsanScale) {

83

84 Value *LastAccessedByte =

85 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));

86

87 if (TypeStoreSize / 8 > 1)

89 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));

90

91 LastAccessedByte =

93

94 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);

95}

96

99 Value *Addr, bool IsWrite,

100 size_t AccessSizeIndex,

101 Value *SizeArgument, bool Recover) {

107

109 raw_svector_ostream AsanErrorCallbackSizedOS(AsanErrorCallbackSizedString);

111 << EndingStr;

112

114 AttributeList AL2;

115 FunctionCallee AsanErrorCallbackSized = M.getOrInsertFunction(

116 AsanErrorCallbackSizedOS.str(),

119 AttributeList AL1;

120

124 << (1ULL << AccessSizeIndex) << EndingStr;

125

126 FunctionCallee AsanErrorCallback = M.getOrInsertFunction(

127 AsanErrorCallbackOS.str(),

129 if (SizeArgument) {

130 Call = IRB.CreateCall(AsanErrorCallbackSized, {Addr, SizeArgument});

131 } else {

133 }

134

135 Call->setCannotMerge();

137}

138

140 Value *Shadow, int AsanScale, uint32_t AsanOffset) {

141

142 Shadow = IRB.CreateLShr(Shadow, AsanScale);

143 if (AsanOffset == 0)

144 return Shadow;

145

146 Value *ShadowBase = ConstantInt::get(IntptrTy, AsanOffset);

147 return IRB.CreateAdd(Shadow, ShadowBase);

148}

149

154 bool IsWrite, Value *SizeArgument,

155 bool UseCalls, bool Recover, int AsanScale,

156 int AsanOffset) {

158 Type *IntptrTy = M.getDataLayout().getIntPtrType(

163 std::max(8U, TypeStoreSize >> AsanScale));

166 Value *ShadowPtr =

167 memToShadow(M, IRB, IntptrTy, AddrLong, AsanScale, AsanOffset);

169 std::max<uint64_t>(Alignment.value() >> AsanScale, 1);

171 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));

173 auto *Cmp2 = createSlowPathCmp(M, IRB, IntptrTy, AddrLong, ShadowValue,

174 TypeStoreSize, AsanScale);

178 generateCrashCode(M, IRB, IntptrTy, CrashTerm, AddrLong, IsWrite,

179 AccessSizeIndex, SizeArgument, Recover);

181}

182

185 TypeSize TypeStoreSize, bool IsWrite,

186 Value *SizeArgument, bool UseCalls, bool Recover,

187 int AsanScale, int AsanOffset) {

189 unsigned Granularity = 1 << AsanScale;

190 const auto FixedSize = TypeStoreSize.getFixedValue();

191 switch (FixedSize) {

192 case 8:

193 case 16:

194 case 32:

195 case 64:

196 case 128:

197 if (Alignment.value() >= Granularity ||

198 Alignment.value() >= FixedSize / 8)

200 M, IRB, OrigIns, InsertBefore, Addr, Alignment, FixedSize, IsWrite,

201 SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);

202 }

203 }

204

207 Type *IntptrTy = M.getDataLayout().getIntPtrType(AddrTy);

211 Value *SizeMinusOne = IRB.CreateAdd(Size, ConstantInt::get(IntptrTy, -1));

212 Value *LastByte =

215 SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);

217 SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);

218}

219

225 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,

226 LI->getType(), LI->getAlign());

228 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,

229 SI->getValueOperand()->getType(), SI->getAlign());

231 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,

232 RMW->getValOperand()->getType(), std::nullopt);

234 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,

235 XCHG->getCompareOperand()->getType(),

236 std::nullopt);

238 switch (CI->getIntrinsicID()) {

239 case Intrinsic::masked_load:

240 case Intrinsic::masked_store:

241 case Intrinsic::masked_gather:

242 case Intrinsic::masked_scatter: {

243 bool IsWrite = CI->getType()->isVoidTy();

244

245 unsigned OpOffset = IsWrite ? 1 : 0;

246 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();

247 MaybeAlign Alignment = CI->getParamAlign(OpOffset);

248 Value *Mask = CI->getOperand(1 + OpOffset);

249 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);

250 break;

251 }

252 case Intrinsic::masked_expandload:

253 case Intrinsic::masked_compressstore: {

254 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;

255 unsigned OpOffset = IsWrite ? 1 : 0;

256 auto *BasePtr = CI->getOperand(OpOffset);

257 MaybeAlign Alignment = BasePtr->getPointerAlignment(DL);

258 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();

260 Value *Mask = CI->getOperand(1 + OpOffset);

261 Type *IntptrTy = M.getDataLayout().getIntPtrType(

262 M.getContext(), BasePtr->getType()->getPointerAddressSpace());

263

265 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);

266 Value *EVL = IB.CreateAddReduce(ExtMask);

267 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);

268 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,

269 EVL);

270 break;

271 }

272 case Intrinsic::vp_load:

273 case Intrinsic::vp_store:

274 case Intrinsic::experimental_vp_strided_load:

275 case Intrinsic::experimental_vp_strided_store: {

277 unsigned IID = CI->getIntrinsicID();

278 bool IsWrite = CI->getType()->isVoidTy();

279 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);

280 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();

281 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(DL);

282 Value *Stride = nullptr;

283 if (IID == Intrinsic::experimental_vp_strided_store ||

284 IID == Intrinsic::experimental_vp_strided_load) {

285 Stride = VPI->getOperand(PtrOpNo + 1);

286

287

288

292 Alignment = Align(1);

293 }

294 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,

295 VPI->getMaskParam(), VPI->getVectorLengthParam(),

296 Stride);

297 break;

298 }

299 case Intrinsic::vp_gather:

300 case Intrinsic::vp_scatter: {

302 unsigned IID = CI->getIntrinsicID();

303 bool IsWrite = IID == Intrinsic::vp_scatter;

304 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);

305 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();

306 MaybeAlign Alignment = VPI->getPointerAlignment();

307 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,

308 VPI->getMaskParam(),

309 VPI->getVectorLengthParam());

310 break;

311 }

312 case Intrinsic::amdgcn_raw_buffer_load:

313 case Intrinsic::amdgcn_raw_ptr_buffer_load:

314 case Intrinsic::amdgcn_raw_buffer_load_format:

315 case Intrinsic::amdgcn_raw_ptr_buffer_load_format:

316 case Intrinsic::amdgcn_raw_tbuffer_load:

317 case Intrinsic::amdgcn_raw_ptr_tbuffer_load:

318 case Intrinsic::amdgcn_struct_buffer_load:

319 case Intrinsic::amdgcn_struct_ptr_buffer_load:

320 case Intrinsic::amdgcn_struct_buffer_load_format:

321 case Intrinsic::amdgcn_struct_ptr_buffer_load_format:

322 case Intrinsic::amdgcn_struct_tbuffer_load:

323 case Intrinsic::amdgcn_struct_ptr_tbuffer_load:

324 case Intrinsic::amdgcn_s_buffer_load:

325 case Intrinsic::amdgcn_global_load_tr_b64:

326 case Intrinsic::amdgcn_global_load_tr_b128: {

327 unsigned PtrOpNo = 0;

328 bool IsWrite = false;

329 Type *Ty = CI->getType();

330 Value *Ptr = CI->getArgOperand(PtrOpNo);

332 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment);

333 break;

334 }

335 case Intrinsic::amdgcn_raw_tbuffer_store:

336 case Intrinsic::amdgcn_raw_ptr_tbuffer_store:

337 case Intrinsic::amdgcn_raw_buffer_store:

338 case Intrinsic::amdgcn_raw_ptr_buffer_store:

339 case Intrinsic::amdgcn_raw_buffer_store_format:

340 case Intrinsic::amdgcn_raw_ptr_buffer_store_format:

341 case Intrinsic::amdgcn_struct_buffer_store:

342 case Intrinsic::amdgcn_struct_ptr_buffer_store:

343 case Intrinsic::amdgcn_struct_buffer_store_format:

344 case Intrinsic::amdgcn_struct_ptr_buffer_store_format:

345 case Intrinsic::amdgcn_struct_tbuffer_store:

346 case Intrinsic::amdgcn_struct_ptr_tbuffer_store: {

347 unsigned PtrOpNo = 1;

348 bool IsWrite = true;

349 Value *Ptr = CI->getArgOperand(PtrOpNo);

352 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment);

353 break;

354 }

355 default:

356 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {

357 if (Type *Ty = CI->getParamByRefType(ArgNo)) {

359 } else if (Type *Ty = CI->getParamByValType(ArgNo)) {

361 }

362 }

363 }

364 }

365}

366}

367}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

const char kAsanReportErrorTemplate[]

const SmallVectorImpl< MachineOperand > & Cond

An instruction that atomically checks whether a specified value is in a memory location,...

an instruction that atomically reads a memory location, combines it with another value,...

This class represents a function call, abstracting a target machine's calling convention.

A parsed version of the target data layout string in and methods for querying it.

A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...

static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)

This static method is the primary way of constructing a FunctionType.

LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)

Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")

BasicBlock::iterator GetInsertPoint() const

Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")

Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)

IntegerType * getInt64Ty()

Fetch the type representing a 64-bit integer.

LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")

Create a call to intrinsic ID with Args, mangled using Types.

Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")

Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)

Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")

Value * CreateIsNotNull(Value *Arg, const Twine &Name="")

Return a boolean value testing if Arg != 0.

CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)

LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)

Create an expression which evaluates to the number of units in Size at runtime.

Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")

void SetInsertPoint(BasicBlock *TheBB)

This specifies that created instructions should be appended to the end of the specified block.

Type * getVoidTy()

Fetch the type representing void.

This provides a uniform API for creating instructions and inserting them into a basic block: either a...

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

An instruction for reading from memory.

LLVM_ABI MDNode * createUnlikelyBranchWeights()

Return metadata containing two branch weights, with significant bias towards false destination.

A Module instance is used to store all the information related to an LLVM module.

static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const

Returns an alignment of the pointer value.

static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

constexpr ScalarTy getFixedValue() const

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

A raw_ostream that writes to an SmallVector or SmallString.

StringRef str() const

Return a StringRef for the vector contents.

static uint64_t getMinRedzoneSizeForGlobal(int AsanScale)

Definition AMDGPUAsanInstrumentation.cpp:24

static Value * memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Value *Shadow, int AsanScale, uint32_t AsanOffset)

Definition AMDGPUAsanInstrumentation.cpp:139

void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)

Get all the memory operands from the instruction that needs to be instrumented.

Definition AMDGPUAsanInstrumentation.cpp:220

static uint64_t getRedzoneSizeForScale(int AsanScale)

Definition AMDGPUAsanInstrumentation.cpp:18

static Instruction * generateCrashCode(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, bool Recover)

Definition AMDGPUAsanInstrumentation.cpp:97

static Instruction * genAMDGPUReportBlock(Module &M, IRBuilder<> &IRB, Value *Cond, bool Recover)

Definition AMDGPUAsanInstrumentation.cpp:57

static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)

Definition AMDGPUAsanInstrumentation.cpp:52

static Value * createSlowPathCmp(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Value *AddrLong, Value *ShadowValue, uint32_t TypeStoreSize, int AsanScale)

Definition AMDGPUAsanInstrumentation.cpp:79

void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)

Instrument the memory operand Addr.

Definition AMDGPUAsanInstrumentation.cpp:183

static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)

Definition AMDGPUAsanInstrumentation.cpp:150

uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)

Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.

Definition AMDGPUAsanInstrumentation.cpp:28

This is an optimization pass for GlobalISel generic memory operations.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

int countr_zero(T Val)

Count number of 0's from the least significant bit to the most stopping at the first 1.

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)

Split the containing block at the specified instruction - everything before SplitBefore stays in the ...

This struct is a compact representation of a valid (non-zero power of two) alignment.

constexpr uint64_t value() const

This is a hole in the type system and should not be abused.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.