LLVM: lib/ExecutionEngine/Interpreter/Execution.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
26#include
27#include
28using namespace llvm;
29
30#define DEBUG_TYPE "interpreter"
31
32STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
33
35 cl::desc("make the interpreter print every volatile load and store"));
36
37
38
39
40
43}
44
45
46
47
48
52 Dest.FloatVal = -Src.FloatVal;
53 break;
56 break;
57 default:
59 }
60}
61
64 Type *Ty = I.getOperand(0)->getType();
65 GenericValue Src = getOperandValue(I.getOperand(0), SF);
67
68
70 R.AggregateVal.resize(Src.AggregateVal.size());
71
72 switch(I.getOpcode()) {
73 default:
74 llvm_unreachable("Don't know how to handle this unary operator");
75 break;
76 case Instruction::FNeg:
77 if (cast(Ty)->getElementType()->isFloatTy()) {
78 for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
79 R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal;
80 } else if (cast(Ty)->getElementType()->isDoubleTy()) {
81 for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
82 R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal;
83 } else {
85 }
86 break;
87 }
88 } else {
89 switch (I.getOpcode()) {
90 default:
91 llvm_unreachable("Don't know how to handle this unary operator");
92 break;
93 case Instruction::FNeg: executeFNegInst(R, Src, Ty); break;
94 }
95 }
97}
98
99
100
101
102
103#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
104 case Type::TY##TyID: \
105 Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
106 break
107
113 default:
114 dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
116 }
117}
118
124 default:
125 dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
127 }
128}
129
135 default:
136 dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
138 }
139}
140
146 default:
147 dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
149 }
150}
151
157 break;
160 break;
161 default:
162 dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
164 }
165}
166
167#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
168 case Type::IntegerTyID: \
169 Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
170 break;
171
172#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
173 case Type::FixedVectorTyID: \
174 case Type::ScalableVectorTyID: { \
175 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
176 Dest.AggregateVal.resize(Src1.AggregateVal.size()); \
177 for (uint32_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
178 Dest.AggregateVal[_i].IntVal = APInt( \
179 1, Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal)); \
180 } break;
181
182
183
184
185
186#define IMPLEMENT_POINTER_ICMP(OP) \
187 case Type::PointerTyID: \
188 Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
189 (void*)(intptr_t)Src2.PointerVal); \
190 break;
191
199 default:
200 dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
202 }
203 return Dest;
204}
205
213 default:
214 dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
216 }
217 return Dest;
218}
219
227 default:
228 dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
230 }
231 return Dest;
232}
233
241 default:
242 dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
244 }
245 return Dest;
246}
247
255 default:
256 dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
258 }
259 return Dest;
260}
261
269 default:
270 dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
272 }
273 return Dest;
274}
275
283 default:
284 dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
286 }
287 return Dest;
288}
289
297 default:
298 dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
300 }
301 return Dest;
302}
303
311 default:
312 dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
314 }
315 return Dest;
316}
317
325 default:
326 dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
328 }
329 return Dest;
330}
331
334 Type *Ty = I.getOperand(0)->getType();
335 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
336 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
338
339 switch (I.getPredicate()) {
350 default:
351 dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
353 }
354
356}
357
358#define IMPLEMENT_FCMP(OP, TY) \
359 case Type::TY##TyID: \
360 Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
361 break
362
363#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
364 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
365 Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
366 for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
367 Dest.AggregateVal[_i].IntVal = APInt(1, \
368 Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
369 break;
370
371#define IMPLEMENT_VECTOR_FCMP(OP) \
372 case Type::FixedVectorTyID: \
373 case Type::ScalableVectorTyID: \
374 if (cast(Ty)->getElementType()->isFloatTy()) { \
375 IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
376 } else { \
377 IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
378 }
379
387 default:
388 dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
390 }
391 return Dest;
392}
393
394#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
395 if (TY->isFloatTy()) { \
396 if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
397 Dest.IntVal = APInt(1,false); \
398 return Dest; \
399 } \
400 } else { \
401 if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
402 Dest.IntVal = APInt(1,false); \
403 return Dest; \
404 } \
405 }
406
407#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
408 assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
409 Dest.AggregateVal.resize( X.AggregateVal.size() ); \
410 for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
411 if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
412 Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
413 Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
414 else { \
415 Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
416 } \
417 }
418
419#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
420 if (TY->isVectorTy()) { \
421 if (cast(TY)->getElementType()->isFloatTy()) { \
422 MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
423 } else { \
424 MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
425 } \
426 } \
427
428
429
432{
434
436
443 default:
444 dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
446 }
447
449 for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
450 if (DestMask.AggregateVal[_i].IntVal == false)
452
453 return Dest;
454}
455
463 default:
464 dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
466 }
467 return Dest;
468}
469
477 default:
478 dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
480 }
481 return Dest;
482}
483
491 default:
492 dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
494 }
495 return Dest;
496}
497
505 default:
506 dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
508 }
509 return Dest;
510}
511
512#define IMPLEMENT_UNORDERED(TY, X,Y) \
513 if (TY->isFloatTy()) { \
514 if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
515 Dest.IntVal = APInt(1,true); \
516 return Dest; \
517 } \
518 } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
519 Dest.IntVal = APInt(1,true); \
520 return Dest; \
521 }
522
523#define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
524 if (TY->isVectorTy()) { \
525 GenericValue DestMask = Dest; \
526 Dest = FUNC(Src1, Src2, Ty); \
527 for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
528 if (DestMask.AggregateVal[_i].IntVal == true) \
529 Dest.AggregateVal[_i].IntVal = APInt(1, true); \
530 return Dest; \
531 }
532
540
541}
542
550}
551
559}
560
568}
569
577}
578
586}
587
594 if (cast(Ty)->getElementType()->isFloatTy()) {
595 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
601 } else {
602 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
608 }
612 else {
615 }
616 return Dest;
617}
618
625 if (cast(Ty)->getElementType()->isFloatTy()) {
626 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
632 } else {
633 for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
639 }
643 else {
646 }
647 return Dest;
648}
649
651 Type *Ty, const bool val) {
656 for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
658 } else {
660 }
661
662 return Dest;
663}
664
667 Type *Ty = I.getOperand(0)->getType();
668 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
669 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
671
672 switch (I.getPredicate()) {
673 default:
674 dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
676 break;
678 break;
680 break;
695 }
696
698}
699
702 Type *Ty = I.getOperand(0)->getType();
703 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
704 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
706
707
710 R.AggregateVal.resize(Src1.AggregateVal.size());
711
712
713#define INTEGER_VECTOR_OPERATION(OP) \
714 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
715 R.AggregateVal[i].IntVal = \
716 Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
717
718
719
720#define INTEGER_VECTOR_FUNCTION(OP) \
721 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
722 R.AggregateVal[i].IntVal = \
723 Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
724
725
726
727#define FLOAT_VECTOR_FUNCTION(OP, TY) \
728 for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
729 R.AggregateVal[i].TY = \
730 Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
731
732
733
734#define FLOAT_VECTOR_OP(OP) { \
735 if (cast(Ty)->getElementType()->isFloatTy()) \
736 FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
737 else { \
738 if (cast(Ty)->getElementType()->isDoubleTy()) \
739 FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
740 else { \
741 dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
742 llvm_unreachable(0); \
743 } \
744 } \
745}
746
747 switch(I.getOpcode()){
748 default:
749 dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
751 break;
766 case Instruction::FRem:
767 if (cast(Ty)->getElementType()->isFloatTy())
768 for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
769 R.AggregateVal[i].FloatVal =
771 else {
772 if (cast(Ty)->getElementType()->isDoubleTy())
773 for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
774 R.AggregateVal[i].DoubleVal =
776 else {
777 dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
779 }
780 }
781 break;
782 }
783 } else {
784 switch (I.getOpcode()) {
785 default:
786 dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
788 break;
789 case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
790 case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
791 case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
792 case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
793 case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
794 case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
795 case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
796 case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
797 case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
798 case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
799 case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
800 case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
801 case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
802 case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
803 case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
804 }
805 }
807}
808
816 for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
819 } else {
820 Dest = (Src1.IntVal == 0) ? Src3 : Src2;
821 }
822 return Dest;
823}
824
827 Type * Ty = I.getOperand(0)->getType();
828 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
829 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
830 GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
833}
834
835
836
837
838
840
841
842
843 ECStack.clear();
846}
847
848
849
850
851
852
853
854
855
856void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
858
859 ECStack.pop_back();
860
861 if (ECStack.empty()) {
862 if (RetTy && ->isVoidTy()) {
863 ExitValue = Result;
864 } else {
865 memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
866 }
867 } else {
868
869
871 if (CallingSF.Caller) {
872
876 SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
877 CallingSF.Caller = nullptr;
878 }
879 }
880}
881
886
887
888 if (I.getNumOperands()) {
889 RetTy = I.getReturnValue()->getType();
890 Result = getOperandValue(I.getReturnValue(), SF);
891 }
892
893 popStackAndReturnValueToCaller(RetTy, Result);
894}
895
897 report_fatal_error("Program executed an 'unreachable' instruction!");
898}
899
903
904 Dest = I.getSuccessor(0);
905 if (.isUnconditional()) {
907 if (getOperandValue(Cond, SF).IntVal == 0)
908 Dest = I.getSuccessor(1);
909 }
910 SwitchToNewBasicBlock(Dest, SF);
911}
912
916 Type *ElTy = Cond->getType();
918
919
921 for (auto Case : I.cases()) {
922 GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
923 if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
924 Dest = cast(Case.getCaseSuccessor());
925 break;
926 }
927 }
928 if (!Dest) Dest = I.getDefaultDest();
929 SwitchToNewBasicBlock(Dest, SF);
930}
931
934 void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
935 SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
936}
937
938
939
940
941
942
943
944
945
946
947
948
950 BasicBlock *PrevBB = SF.CurBB;
951 SF.CurBB = Dest;
953
954 if (!isa(SF.CurInst)) return;
955
956
957 std::vector ResultValues;
958
960
962 assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
964
965
966 ResultValues.push_back(getOperandValue(IncomingValue, SF));
967 }
968
969
971 for (unsigned i = 0; isa(SF.CurInst); ++SF.CurInst, ++i) {
973 SetValue(PN, ResultValues[i], SF);
974 }
975}
976
977
978
979
980
983
984 Type *Ty = I.getAllocatedType();
985
986
987 unsigned NumElements =
989
991
992
993 unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
994
995
997
999 << " bytes) x " << NumElements << " (Total: " << MemToAlloc
1000 << ") at " << uintptr_t(Memory) << '\n');
1001
1003 assert(Result.PointerVal && "Null pointer returned by malloc!");
1005
1006 if (I.getOpcode() == Instruction::Alloca)
1007 ECStack.back().Allocas.add(Memory);
1008}
1009
1010
1011
1015 assert(Ptr->getType()->isPointerTy() &&
1016 "Cannot getElementOffset of a nonpointer type!");
1017
1019
1021 if (StructType *STy = I.getStructTypeOrNull()) {
1023
1024 const ConstantInt *CPU = cast(I.getOperand());
1026
1028 } else {
1029
1030 GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
1031
1032 int64_t Idx;
1034 cast(I.getOperand()->getType())->getBitWidth();
1037 else {
1038 assert(BitWidth == 64 && "Invalid index type for getelementptr");
1040 }
1042 }
1043 }
1044
1049}
1050
1053 SetValue(&I, executeGEPOperation(I.getPointerOperand(),
1055}
1056
1059 GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1065 dbgs() << "Volatile load " << I;
1066}
1067
1070 GenericValue Val = getOperandValue(I.getOperand(0), SF);
1071 GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1073 I.getOperand(0)->getType());
1075 dbgs() << "Volatile store: " << I;
1076}
1077
1078
1079
1080
1081
1088}
1089
1091
1092}
1093
1096 SetValue(&I, getOperandValue(*I.arg_begin(), SF), SF);
1097}
1098
1101
1102
1103
1104
1107 bool atBegin(Parent->begin() == Me);
1108 if (!atBegin)
1109 --Me;
1111
1112
1113
1114 if (atBegin) {
1116 } else {
1119 }
1120}
1121
1124
1126 std::vector ArgVals;
1128 ArgVals.reserve(NumArgs);
1130 ArgVals.push_back(getOperandValue(V, SF));
1131
1132
1133
1136}
1137
1138
1141 unsigned valueWidth = valueToShift.getBitWidth();
1142 if (orgShiftAmount < (uint64_t)valueWidth)
1143 return orgShiftAmount;
1144
1145
1146 return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
1147}
1148
1149
1152 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1153 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1156
1160 for (unsigned i = 0; i < src1Size; i++) {
1164 Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
1166 }
1167 } else {
1168
1172 }
1173
1175}
1176
1179 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1180 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1183
1187 for (unsigned i = 0; i < src1Size; i++) {
1191 Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1193 }
1194 } else {
1195
1199 }
1200
1202}
1203
1206 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1207 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1210
1214 for (unsigned i = 0; i < src1Size; i++) {
1218 Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
1220 }
1221 } else {
1222
1226 }
1227
1229}
1230
1233 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1237 unsigned DBitWidth = cast(DstVecTy)->getBitWidth();
1238 unsigned NumElts = Src.AggregateVal.size();
1239
1241 for (unsigned i = 0; i < NumElts; i++)
1242 Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
1243 } else {
1244 IntegerType *DITy = cast(DstTy);
1245 unsigned DBitWidth = DITy->getBitWidth();
1246 Dest.IntVal = Src.IntVal.trunc(DBitWidth);
1247 }
1248 return Dest;
1249}
1250
1254 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1257 unsigned DBitWidth = cast(DstVecTy)->getBitWidth();
1258 unsigned size = Src.AggregateVal.size();
1259
1261 for (unsigned i = 0; i < size; i++)
1262 Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
1263 } else {
1264 auto *DITy = cast(DstTy);
1265 unsigned DBitWidth = DITy->getBitWidth();
1266 Dest.IntVal = Src.IntVal.sext(DBitWidth);
1267 }
1268 return Dest;
1269}
1270
1274 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1277 unsigned DBitWidth = cast(DstVecTy)->getBitWidth();
1278
1279 unsigned size = Src.AggregateVal.size();
1280
1282 for (unsigned i = 0; i < size; i++)
1283 Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
1284 } else {
1285 auto *DITy = cast(DstTy);
1286 unsigned DBitWidth = DITy->getBitWidth();
1287 Dest.IntVal = Src.IntVal.zext(DBitWidth);
1288 }
1289 return Dest;
1290}
1291
1294 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1295
1296 if (isa(SrcVal->getType())) {
1299 "Invalid FPTrunc instruction");
1300
1301 unsigned size = Src.AggregateVal.size();
1302
1304 for (unsigned i = 0; i < size; i++)
1305 Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
1306 } else {
1308 "Invalid FPTrunc instruction");
1309 Dest.FloatVal = (float)Src.DoubleVal;
1310 }
1311
1312 return Dest;
1313}
1314
1317 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1318
1319 if (isa(SrcVal->getType())) {
1322
1323 unsigned size = Src.AggregateVal.size();
1324
1326 for (unsigned i = 0; i < size; i++)
1327 Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
1328 } else {
1330 "Invalid FPExt instruction");
1331 Dest.DoubleVal = (double)Src.FloatVal;
1332 }
1333
1334 return Dest;
1335}
1336
1340 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1341
1342 if (isa(SrcTy)) {
1345 uint32_t DBitWidth = cast(DstVecTy)->getBitWidth();
1346 unsigned size = Src.AggregateVal.size();
1347
1349
1352 for (unsigned i = 0; i < size; i++)
1354 Src.AggregateVal[i].FloatVal, DBitWidth);
1355 } else {
1356 for (unsigned i = 0; i < size; i++)
1358 Src.AggregateVal[i].DoubleVal, DBitWidth);
1359 }
1360 } else {
1361
1362 uint32_t DBitWidth = cast(DstTy)->getBitWidth();
1364
1367 else {
1369 }
1370 }
1371
1372 return Dest;
1373}
1374
1378 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1379
1380 if (isa(SrcTy)) {
1383 uint32_t DBitWidth = cast(DstVecTy)->getBitWidth();
1384 unsigned size = Src.AggregateVal.size();
1385
1387
1390 for (unsigned i = 0; i < size; i++)
1392 Src.AggregateVal[i].FloatVal, DBitWidth);
1393 } else {
1394 for (unsigned i = 0; i < size; i++)
1396 Src.AggregateVal[i].DoubleVal, DBitWidth);
1397 }
1398 } else {
1399
1400 unsigned DBitWidth = cast(DstTy)->getBitWidth();
1402
1405 else {
1407 }
1408 }
1409 return Dest;
1410}
1411
1414 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1415
1416 if (isa(SrcVal->getType())) {
1418 unsigned size = Src.AggregateVal.size();
1419
1421
1424 for (unsigned i = 0; i < size; i++)
1427 } else {
1428 for (unsigned i = 0; i < size; i++)
1431 }
1432 } else {
1433
1437 else {
1439 }
1440 }
1441 return Dest;
1442}
1443
1446 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1447
1448 if (isa(SrcVal->getType())) {
1450 unsigned size = Src.AggregateVal.size();
1451
1453
1456 for (unsigned i = 0; i < size; i++)
1459 } else {
1460 for (unsigned i = 0; i < size; i++)
1463 }
1464 } else {
1465
1467
1470 else {
1472 }
1473 }
1474
1475 return Dest;
1476}
1477
1480 uint32_t DBitWidth = cast(DstTy)->getBitWidth();
1481 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1483
1484 Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
1485 return Dest;
1486}
1487
1490 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1492
1494 if (PtrSize != Src.IntVal.getBitWidth())
1495 Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
1496
1498 return Dest;
1499}
1500
1503
1504
1505
1507 GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1508
1509 if (isa(SrcTy) || isa(DstTy)) {
1510
1511
1514 Type *SrcElemTy;
1515 Type *DstElemTy;
1516 unsigned SrcBitSize;
1517 unsigned DstBitSize;
1518 unsigned SrcNum;
1519 unsigned DstNum;
1520
1521 if (isa(SrcTy)) {
1524 SrcNum = Src.AggregateVal.size();
1525 SrcVec = Src;
1526 } else {
1527
1528 SrcElemTy = SrcTy;
1530 SrcNum = 1;
1532 }
1533
1534 if (isa(DstTy)) {
1537 DstNum = (SrcNum * SrcBitSize) / DstBitSize;
1538 } else {
1539 DstElemTy = DstTy;
1541 DstNum = 1;
1542 }
1543
1544 if (SrcNum * SrcBitSize != DstNum * DstBitSize)
1546
1547
1550 for (unsigned i = 0; i < SrcNum; i++)
1553
1554 } else if (SrcElemTy->isDoubleTy()) {
1555 for (unsigned i = 0; i < SrcNum; i++)
1559 for (unsigned i = 0; i < SrcNum; i++)
1561 } else {
1562
1564 }
1565
1566
1567 if (DstNum < SrcNum) {
1568
1569 unsigned Ratio = SrcNum / DstNum;
1570 unsigned SrcElt = 0;
1571 for (unsigned i = 0; i < DstNum; i++) {
1575 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
1576 for (unsigned j = 0; j < Ratio; j++) {
1578 Tmp = Tmp.zext(SrcBitSize);
1579 Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
1580 Tmp = Tmp.zext(DstBitSize);
1581 Tmp <<= ShiftAmt;
1582 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
1584 }
1586 }
1587 } else {
1588
1589 unsigned Ratio = DstNum / SrcNum;
1590 for (unsigned i = 0; i < SrcNum; i++) {
1591 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
1592 for (unsigned j = 0; j < Ratio; j++) {
1597
1598 if (DstBitSize < SrcBitSize)
1600 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
1602 }
1603 }
1604 }
1605
1606
1607 if (isa(DstTy)) {
1610 for (unsigned i = 0; i < DstNum; i++)
1612 TempDst.AggregateVal[i].IntVal.bitsToDouble();
1613 } else if (DstElemTy->isFloatTy()) {
1615 for (unsigned i = 0; i < DstNum; i++)
1617 TempDst.AggregateVal[i].IntVal.bitsToFloat();
1618 } else {
1619 Dest = TempDst;
1620 }
1621 } else {
1624 else if (DstElemTy->isFloatTy()) {
1626 } else {
1628 }
1629 }
1630 } else {
1631
1632
1642 Dest.IntVal = Src.IntVal;
1643 } else {
1645 }
1646 } else if (DstTy->isFloatTy()) {
1648 Dest.FloatVal = Src.IntVal.bitsToFloat();
1649 else {
1650 Dest.FloatVal = Src.FloatVal;
1651 }
1654 Dest.DoubleVal = Src.IntVal.bitsToDouble();
1655 else {
1657 }
1658 } else {
1660 }
1661 }
1662
1663 return Dest;
1664}
1665
1668 SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
1669}
1670
1673 SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
1674}
1675
1678 SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
1679}
1680
1683 SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
1684}
1685
1688 SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
1689}
1690
1693 SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1694}
1695
1698 SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1699}
1700
1703 SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
1704}
1705
1708 SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
1709}
1710
1713 SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
1714}
1715
1718 SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
1719}
1720
1723 SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
1724}
1725
1726#define IMPLEMENT_VAARG(TY) \
1727 case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
1728
1731
1732
1733
1734 GenericValue VAList = getOperandValue(I.getOperand(0), SF);
1741 Dest.IntVal = Src.IntVal;
1742 break;
1746 default:
1747 dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
1749 }
1750
1751
1753
1754
1756}
1757
1760 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1761 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1763
1766
1769 default:
1770 dbgs() << "Unhandled destination type for extractelement instruction: "
1771 << *Ty << "\n";
1773 break;
1776 break;
1779 break;
1782 break;
1783 }
1784 } else {
1785 dbgs() << "Invalid index in extractelement instruction\n";
1786 }
1787
1789}
1790
1793 VectorType *Ty = cast(I.getType());
1794
1795 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1796 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1797 GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
1799
1801
1804
1806 llvm_unreachable("Invalid index in insertelement instruction");
1807 switch (TyContained->getTypeID()) {
1808 default:
1809 llvm_unreachable("Unhandled dest type for insertelement instruction");
1812 break;
1815 break;
1818 break;
1819 }
1821}
1822
1825
1826 VectorType *Ty = cast(I.getType());
1827
1828 GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1829 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1831
1832
1833
1834
1835
1839 unsigned src3Size = I.getShuffleMask().size();
1840
1842
1843 switch (TyContained->getTypeID()) {
1844 default:
1845 llvm_unreachable("Unhandled dest type for insertelement instruction");
1846 break;
1848 for( unsigned i=0; i<src3Size; i++) {
1849 unsigned j = std::max(0, I.getMaskValue(i));
1850 if(j < src1Size)
1852 else if(j < src1Size + src2Size)
1854 else
1855
1856
1857
1858
1859
1860 llvm_unreachable("Invalid mask in shufflevector instruction");
1861 }
1862 break;
1864 for( unsigned i=0; i<src3Size; i++) {
1865 unsigned j = std::max(0, I.getMaskValue(i));
1866 if(j < src1Size)
1868 else if(j < src1Size + src2Size)
1870 else
1871 llvm_unreachable("Invalid mask in shufflevector instruction");
1872 }
1873 break;
1875 for( unsigned i=0; i<src3Size; i++) {
1876 unsigned j = std::max(0, I.getMaskValue(i));
1877 if(j < src1Size)
1879 else if(j < src1Size + src2Size)
1882 else
1883 llvm_unreachable("Invalid mask in shufflevector instruction");
1884 }
1885 break;
1886 }
1888}
1889
1892 Value *Agg = I.getAggregateOperand();
1894 GenericValue Src = getOperandValue(Agg, SF);
1895
1897 unsigned Num = I.getNumIndices();
1899
1900 for (unsigned i = 0 ; i < Num; ++i) {
1902 ++IdxBegin;
1903 }
1904
1906 switch (IndexedType->getTypeID()) {
1907 default:
1908 llvm_unreachable("Unhandled dest type for extractelement instruction");
1909 break;
1912 break;
1915 break;
1918 break;
1924 break;
1927 break;
1928 }
1929
1931}
1932
1934
1936 Value *Agg = I.getAggregateOperand();
1937
1938 GenericValue Src1 = getOperandValue(Agg, SF);
1939 GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1940 GenericValue Dest = Src1;
1941
1943 unsigned Num = I.getNumIndices();
1944
1946 for (unsigned i = 0 ; i < Num; ++i) {
1948 ++IdxBegin;
1949 }
1950
1951
1953
1954 switch (IndexedType->getTypeID()) {
1955 default:
1956 llvm_unreachable("Unhandled dest type for insertelement instruction");
1957 break;
1960 break;
1963 break;
1966 break;
1972 break;
1975 break;
1976 }
1977
1979}
1980
1983 switch (CE->getOpcode()) {
1984 case Instruction::Trunc:
1985 return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
1986 case Instruction::PtrToInt:
1987 return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
1988 case Instruction::IntToPtr:
1989 return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
1990 case Instruction::BitCast:
1991 return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
1992 case Instruction::GetElementPtr:
1993 return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
1995 break;
1996 }
1997
1998
1999
2000 GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
2001 GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
2003 switch (CE->getOpcode()) {
2004 case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
2005 case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
2006 case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
2007 case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
2008 case Instruction::Shl:
2010 break;
2011 default:
2012 dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
2014 }
2015 return Dest;
2016}
2017
2019 if (ConstantExpr *CE = dyn_cast(V)) {
2020 return getConstantExprValue(CE, SF);
2021 } else if (Constant *CPV = dyn_cast(V)) {
2023 } else if (GlobalValue *GV = dyn_cast(V)) {
2025 } else {
2027 }
2028}
2029
2030
2031
2032
2033
2034
2035
2036
2038 assert((ECStack.empty() || !ECStack.back().Caller ||
2039 ECStack.back().Caller->arg_size() == ArgVals.size()) &&
2040 "Incorrect number of arguments passed into function call!");
2041
2042 ECStack.emplace_back();
2045
2046
2047 if (F->isDeclaration()) {
2049
2050 popStackAndReturnValueToCaller (F->getReturnType (), Result);
2051 return;
2052 }
2053
2054
2055 StackFrame.CurBB = &F->front();
2057
2058
2059 assert((ArgVals.size() == F->arg_size() ||
2060 (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
2061 "Invalid number of values passed to function invocation!");
2062
2063
2064 unsigned i = 0;
2066 AI != E; ++AI, ++i)
2067 SetValue(&*AI, ArgVals[i], StackFrame);
2068
2069
2070 StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
2071}
2072
2073
2075 while (!ECStack.empty()) {
2076
2077 ExecutionContext &SF = ECStack.back();
2079
2080
2081 ++NumDynamicInsts;
2082
2083 LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n");
2085 }
2086}
This file implements a class to represent arbitrary precision integral constant values and operations...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2, Type *Ty)
static void executeFSubInst(GenericValue &Dest, GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2, Type *Ty)
#define FLOAT_VECTOR_OP(OP)
#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY)
static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2, Type *Ty)
static void executeFDivInst(GenericValue &Dest, GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, Type *Ty)
#define INTEGER_VECTOR_OPERATION(OP)
#define IMPLEMENT_BINARY_OPERATOR(OP, TY)
static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2, Type *Ty)
#define IMPLEMENT_SCALAR_NANS(TY, X, Y)
static void executeFAddInst(GenericValue &Dest, GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2, GenericValue Src3, Type *Ty)
static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2, Type *Ty)
static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty)
static cl::opt< bool > PrintVolatile("interpreter-print-volatile", cl::Hidden, cl::desc("make the interpreter print every volatile load and store"))
#define INTEGER_VECTOR_FUNCTION(OP)
#define MASK_VECTOR_NANS(TY, X, Y, FLAG)
#define IMPLEMENT_VECTOR_FCMP(OP)
static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2, Type *Ty)
#define IMPLEMENT_POINTER_ICMP(OP)
static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2, Type *Ty)
#define IMPLEMENT_UNORDERED(TY, X, Y)
static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2, Type *Ty, const bool val)
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF)
#define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC)
#define IMPLEMENT_VAARG(TY)
static void executeFRemInst(GenericValue &Dest, GenericValue Src1, GenericValue Src2, Type *Ty)
static unsigned getShiftAmount(uint64_t orgShiftAmount, llvm::APInt valueToShift)
#define IMPLEMENT_INTEGER_ICMP(OP, TY)
#define IMPLEMENT_FCMP(OP, TY)
static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2, Type *Ty)
static void executeFMulInst(GenericValue &Dest, GenericValue Src1, GenericValue Src2, Type *Ty)
static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2, Type *Ty)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt floatToBits(float V)
Converts a float to APInt bits.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
static APInt doubleToBits(double V)
Converts a double to APInt bits.
APInt shl(unsigned shiftAmt) const
Left-shift function.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::iterator iterator
Instruction iterators...
This class represents a no-op cast from one type to another.
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getCalledOperand() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
A constant value that is initialized with an expression using other constant values.
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
bool isLittleEndian() const
Layout endianness...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr, Type *Ty)
StoreValueToMemory - Stores the data in Val of type Ty at address Ptr.
GenericValue getConstantValue(const Constant *C)
Converts a Constant* into a GenericValue, including handling of ConstantExpr values.
const DataLayout & getDataLayout() const
void * getPointerToGlobal(const GlobalValue *GV)
getPointerToGlobal - This returns the address of the specified global value.
void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr, Type *Ty)
FIXME: document.
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
void visit(Iterator Start, Iterator End)
This class represents a cast from an integer to a pointer.
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
void visitSIToFPInst(SIToFPInst &I)
void visitFCmpInst(FCmpInst &I)
void visitPtrToIntInst(PtrToIntInst &I)
void visitShuffleVectorInst(ShuffleVectorInst &I)
void visitCallBase(CallBase &I)
void visitAllocaInst(AllocaInst &I)
void visitSelectInst(SelectInst &I)
void exitCalled(GenericValue GV)
void visitReturnInst(ReturnInst &I)
void visitIntToPtrInst(IntToPtrInst &I)
void visitUnreachableInst(UnreachableInst &I)
void visitICmpInst(ICmpInst &I)
void visitLShr(BinaryOperator &I)
void visitUIToFPInst(UIToFPInst &I)
void visitIndirectBrInst(IndirectBrInst &I)
void visitInsertValueInst(InsertValueInst &I)
void runAtExitHandlers()
runAtExitHandlers - Run any functions registered by the program's calls to atexit(3),...
void visitBranchInst(BranchInst &I)
void visitVAArgInst(VAArgInst &I)
void visitStoreInst(StoreInst &I)
void visitExtractValueInst(ExtractValueInst &I)
void visitSwitchInst(SwitchInst &I)
void visitExtractElementInst(ExtractElementInst &I)
void visitVACopyInst(VACopyInst &I)
void visitVAEndInst(VAEndInst &I)
void visitTruncInst(TruncInst &I)
void visitFPToUIInst(FPToUIInst &I)
void visitLoadInst(LoadInst &I)
void visitGetElementPtrInst(GetElementPtrInst &I)
void callFunction(Function *F, ArrayRef< GenericValue > ArgVals)
void visitInsertElementInst(InsertElementInst &I)
void visitUnaryOperator(UnaryOperator &I)
void visitFPExtInst(FPExtInst &I)
void visitVAStartInst(VAStartInst &I)
void visitBitCastInst(BitCastInst &I)
void visitSExtInst(SExtInst &I)
void visitAShr(BinaryOperator &I)
GenericValue callExternalFunction(Function *F, ArrayRef< GenericValue > ArgVals)
void visitFPTruncInst(FPTruncInst &I)
void visitBinaryOperator(BinaryOperator &I)
void visitShl(BinaryOperator &I)
void visitZExtInst(ZExtInst &I)
void visitFPToSIInst(FPToSIInst &I)
void visitIntrinsicInst(IntrinsicInst &I)
A wrapper class for inspecting calls to intrinsic functions.
void LowerIntrinsicCall(CallInst *CI)
Replace a call to the specified intrinsic function.
An instruction for reading from memory.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
This class represents a cast from a pointer to an integer.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
This instruction constructs a fixed permutation of two input vectors.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
@ ScalableVectorTyID
Scalable SIMD vector type.
@ FloatTyID
32-bit floating point type
@ IntegerTyID
Arbitrary bit width integers.
@ FixedVectorTyID
Fixed width SIMD vector type.
@ DoubleTyID
64-bit floating point type
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeID getTypeID() const
Return the type id for the type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This represents the llvm.va_copy intrinsic.
This represents the llvm.va_end intrinsic.
This represents the llvm.va_start intrinsic.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
Type * getElementType() const
This class represents zero extension of integer types.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
float RoundAPIntToFloat(const APInt &APIVal)
Converts the given APInt to a float value.
double RoundAPIntToDouble(const APInt &APIVal)
Converts the given APInt to a double value.
APInt RoundFloatToAPInt(float Float, unsigned width)
Converts a float value into a APInt.
APInt RoundDoubleToAPInt(double Double, unsigned width)
Converts the given double value into a APInt.
double RoundSignedAPIntToDouble(const APInt &APIVal)
Converts the given APInt to a double value.
float RoundSignedAPIntToFloat(const APInt &APIVal)
Converts the given APInt to a float value.
This is an optimization pass for GlobalISel generic memory operations.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
gep_type_iterator gep_type_end(const User *GEP)
GenericValue PTOGV(void *P)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
LLVM_ATTRIBUTE_RETURNS_NONNULL void * safe_malloc(size_t Sz)
constexpr unsigned BitWidth
void * GVTOP(const GenericValue &GV)
gep_type_iterator gep_type_begin(const User *GEP)
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
BasicBlock::iterator CurInst
std::map< Value *, GenericValue > Values
std::vector< GenericValue > VarArgs
struct IntPair UIntPairVal
std::vector< GenericValue > AggregateVal