hotspot Sdiff src/share/vm/oops (original) (raw)
523 524 virtual void print_data_on(outputStream* st, const char* extra = NULL) const { 525 ShouldNotReachHere(); 526 } 527 528 void print_data_on(outputStream* st, const MethodData* md) const; 529 530 void print_shared(outputStream* st, const char* name, const char* extra) const; 531 void tab(outputStream* st, bool first = false) const; 532 }; 533 534 // BitData 535 // 536 // A BitData holds a flag or two in its header. 537 class BitData : public ProfileData { 538 protected: 539 enum { 540 // null_seen: 541 // saw a null operand (cast/aastore/instanceof) 542 null_seen_flag = DataLayout::first_flag + 0
543 }; 544 enum { bit_cell_count = 0 }; // no additional data fields needed. 545 public: 546 BitData(DataLayout* layout) : ProfileData(layout) { 547 } 548 549 virtual bool is_BitData() const { return true; } 550 551 static int static_cell_count() { 552 return bit_cell_count; 553 } 554 555 virtual int cell_count() const { 556 return static_cell_count(); 557 } 558 559 // Accessor 560 561 // The null_seen flag bit is specially known to the interpreter. 562 // Consulting it allows the compiler to avoid setting up null_check traps. 563 bool null_seen() { return flag_at(null_seen_flag); } 564 void set_null_seen() { set_flag_at(null_seen_flag); } 565
566 567 // Code generation support 568 static int null_seen_byte_constant() { 569 return flag_number_to_byte_constant(null_seen_flag); 570 } 571 572 static ByteSize bit_data_size() { 573 return cell_offset(bit_cell_count); 574 } 575 576 #ifdef CC_INTERP 577 static int bit_data_size_in_bytes() { 578 return cell_offset_in_bytes(bit_cell_count); 579 } 580 581 static void set_null_seen(DataLayout* layout) { 582 set_flag_at(layout, null_seen_flag); 583 } 584 585 static DataLayout* advance(DataLayout* layout) {
1149 if (has_arguments()) { 1150 _args.clean_weak_klass_links(is_alive_closure); 1151 } 1152 if (has_return()) { 1153 _ret.clean_weak_klass_links(is_alive_closure); 1154 } 1155 } 1156 1157 virtual void print_data_on(outputStream* st, const char* extra = NULL) const; 1158 }; 1159 1160 // ReceiverTypeData 1161 // 1162 // A ReceiverTypeData is used to access profiling information about a 1163 // dynamic type check. It consists of a counter which counts the total times 1164 // that the check is reached, and a series of (Klass*, count) pairs 1165 // which are used to store a type profile for the receiver of the check. 1166 class ReceiverTypeData : public CounterData { 1167 protected: 1168 enum {
1169 receiver0_offset = counter_cell_count,
1170 count0_offset, 1171 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset 1172 }; 1173 1174 public: 1175 ReceiverTypeData(DataLayout* layout) : CounterData(layout) { 1176 assert(layout->tag() == DataLayout::receiver_type_data_tag || 1177 layout->tag() == DataLayout::virtual_call_data_tag || 1178 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); 1179 } 1180 1181 virtual bool is_ReceiverTypeData() const { return true; } 1182 1183 static int static_cell_count() { 1184 return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count; 1185 } 1186 1187 virtual int cell_count() const { 1188 return static_cell_count(); 1189 } 1190 1191 // Direct accessors 1192 static uint row_limit() { 1193 return TypeProfileWidth; 1194 } 1195 static int receiver_cell_index(uint row) { 1196 return receiver0_offset + row * receiver_type_row_cell_count; 1197 } 1198 static int receiver_count_cell_index(uint row) { 1199 return count0_offset + row * receiver_type_row_cell_count; 1200 } 1201 1202 Klass* receiver(uint row) const { 1203 assert(row < row_limit(), "oob"); 1204
1226 assert(row < row_limit(), "oob"); 1227 // Clear total count - indicator of polymorphic call site. 1228 // The site may look like as monomorphic after that but 1229 // it allow to have more accurate profiling information because 1230 // there was execution phase change since klasses were unloaded. 1231 // If the site is still polymorphic then MDO will be updated 1232 // to reflect it. But it could be the case that the site becomes 1233 // only bimorphic. Then keeping total count not 0 will be wrong. 1234 // Even if we use monomorphic (when it is not) for compilation 1235 // we will only have trap, deoptimization and recompile again 1236 // with updated MDO after executing method in Interpreter. 1237 // An additional receiver will be recorded in the cleaned row 1238 // during next call execution. 1239 // 1240 // Note: our profiling logic works with empty rows in any slot. 1241 // We do sorting a profiling info (ciCallProfile) for compilation. 1242 // 1243 set_count(0); 1244 set_receiver(row, NULL); 1245 set_receiver_count(row, 0);
1246 } 1247 1248 // Code generation support 1249 static ByteSize receiver_offset(uint row) { 1250 return cell_offset(receiver_cell_index(row)); 1251 } 1252 static ByteSize receiver_count_offset(uint row) { 1253 return cell_offset(receiver_count_cell_index(row)); 1254 }
1255 static ByteSize receiver_type_data_size() { 1256 return cell_offset(static_cell_count()); 1257 } 1258 1259 // GC support 1260 virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); 1261 1262 #ifdef CC_INTERP 1263 static int receiver_type_data_size_in_bytes() { 1264 return cell_offset_in_bytes(static_cell_count()); 1265 } 1266 1267 static Klass receiver_unchecked(DataLayout layout, uint row) { 1268 Klass* recv = (Klass*)layout->cell_at(receiver_cell_index(row)); 1269 return recv; 1270 } 1271 1272 static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) { 1273 const int num_rows = row_limit(); 1274 // Receiver already exists?
1299 void print_receiver_data_on(outputStream* st) const; 1300 void print_data_on(outputStream* st, const char* extra = NULL) const; 1301 }; 1302 1303 // VirtualCallData 1304 // 1305 // A VirtualCallData is used to access profiling information about a 1306 // virtual call. For now, it has nothing more than a ReceiverTypeData. 1307 class VirtualCallData : public ReceiverTypeData { 1308 public: 1309 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) { 1310 assert(layout->tag() == DataLayout::virtual_call_data_tag || 1311 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); 1312 } 1313 1314 virtual bool is_VirtualCallData() const { return true; } 1315 1316 static int static_cell_count() { 1317 // At this point we could add more profile state, e.g., for arguments. 1318 // But for now it's the same size as the base record type. 1319 return ReceiverTypeData::static_cell_count(); 1320 } 1321 1322 virtual int cell_count() const { 1323 return static_cell_count(); 1324 } 1325 1326 // Direct accessors 1327 static ByteSize virtual_call_data_size() { 1328 return cell_offset(static_cell_count()); 1329 } 1330 1331 #ifdef CC_INTERP 1332 static int virtual_call_data_size_in_bytes() { 1333 return cell_offset_in_bytes(static_cell_count()); 1334 } 1335 1336 static DataLayout* advance(DataLayout* layout) { 1337 return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes()); 1338 } 1339 #endif // CC_INTERP 1340
1341 void print_data_on(outputStream* st, const char* extra = NULL) const; 1342 }; 1343 1344 // VirtualCallTypeData 1345 // 1346 // A VirtualCallTypeData is used to access profiling information about 1347 // a virtual call for which we collect type information about 1348 // arguments and return value. 1349 class VirtualCallTypeData : public VirtualCallData { 1350 private: 1351 // entries for arguments if any 1352 TypeStackSlotEntries _args; 1353 // entry for return type if any 1354 ReturnTypeEntry _ret; 1355 1356 int cell_count_global_offset() const { 1357 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset(); 1358 } 1359 1360 // number of cells not counting the header
2036 private: 2037 friend class ProfileData; 2038 2039 // Back pointer to the Method* 2040 Method* _method; 2041 2042 // Size of this oop in bytes 2043 int _size; 2044 2045 // Cached hint for bci_to_dp and bci_to_data 2046 int _hint_di; 2047 2048 Mutex _extra_data_lock; 2049 2050 MethodData(methodHandle method, int size, TRAPS); 2051 public: 2052 static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS); 2053 MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData 2054 2055 bool is_methodData() const volatile { return true; }
2056 2057 // Whole-method sticky bits and flags 2058 enum { 2059 _trap_hist_limit = 22, // decoupled from Deoptimization::Reason_LIMIT 2060 _trap_hist_mask = max_jubyte, 2061 _extra_data_count = 4 // extra DataLayout headers, for trap history 2062 }; // Public flag values 2063 private: 2064 uint _nof_decompiles; // count of all nmethod removals 2065 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits 2066 uint _nof_overflow_traps; // trap count, excluding _trap_hist 2067 union { 2068 intptr_t _align; 2069 u1 _array[_trap_hist_limit]; 2070 } _trap_hist; 2071 2072 // Support for interprocedural escape analysis, from Thomas Kotzmann. 2073 intx _eflags; // flags on escape information 2074 intx _arg_local; // bit set of non-escaping arguments 2075 intx _arg_stack; // bit set of stack-allocatable arguments 2076 intx _arg_returned; // bit set of returned arguments 2077 2078 int _creation_mileage; // method mileage at MDO creation 2079
2087 // Counter values at the time profiling started. 2088 int _invocation_counter_start; 2089 int _backedge_counter_start; 2090 uint _tenure_traps; 2091 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog 2092 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog 2093 2094 #if INCLUDE_RTM_OPT 2095 // State of RTM code generation during compilation of the method 2096 int _rtm_state; 2097 #endif 2098 2099 // Number of loops and blocks is computed when compiling the first 2100 // time with C1. It is used to determine if method is trivial. 2101 short _num_loops; 2102 short _num_blocks; 2103 // Does this method contain anything worth profiling? 2104 enum WouldProfile {unknown, no_profile, profile}; 2105 WouldProfile _would_profile; 2106
2107 // Size of _data array in bytes. (Excludes header and extra_data fields.) 2108 int _data_size; 2109 2110 // data index for the area dedicated to parameters. -1 if no 2111 // parameter profiling. 2112 enum { no_parameters = -2, parameters_uninitialized = -1 }; 2113 int _parameters_type_data_di; 2114 int parameters_size_in_bytes() const { 2115 ParametersTypeData* param = parameters_type_data(); 2116 return param == NULL ? 0 : param->size_in_bytes(); 2117 } 2118 2119 // Beginning of the data entries 2120 intptr_t _data[1]; 2121 2122 // Helper for size computation 2123 static int compute_data_size(BytecodeStream* stream); 2124 static int bytecode_cell_count(Bytecodes::Code code); 2125 static bool is_speculative_trap_bytecode(Bytecodes::Code code); 2126 enum { no_profile_data = -1, variable_cell_count = -2 };
2365 } 2366 // If SpeculativeTrapData allocation fails try to allocate a 2367 // regular entry 2368 data = bci_to_data(bci); 2369 if (data != NULL) { 2370 return data; 2371 } 2372 return bci_to_extra_data(bci, NULL, true); 2373 } 2374 2375 // Add a handful of extra data records, for trap tracking. 2376 DataLayout* extra_data_base() const { return limit_data_position(); } 2377 DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); } 2378 DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() - 2379 parameters_size_in_bytes()); } 2380 int extra_data_size() const { return (address)extra_data_limit() - (address)extra_data_base(); } 2381 static DataLayout* next_extra(DataLayout* dp); 2382 2383 // Return (uint)-1 for overflow. 2384 uint trap_count(int reason) const { 2385 assert((uint)reason < _trap_hist_limit, "oob"); 2386 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1; 2387 } 2388 // For loops: 2389 static uint trap_reason_limit() { return _trap_hist_limit; } 2390 static uint trap_count_limit() { return _trap_hist_mask; } 2391 uint inc_trap_count(int reason) { 2392 // Count another trap, anywhere in this method. 2393 assert(reason >= 0, "must be single trap"); 2394 if ((uint)reason < _trap_hist_limit) { 2395 uint cnt1 = 1 + _trap_hist._array[reason]; 2396 if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow... 2397 _trap_hist._array[reason] = cnt1; 2398 return cnt1; 2399 } else { 2400 return _trap_hist_mask + (++_nof_overflow_traps); 2401 } 2402 } else { 2403 // Could not represent the count in the histogram. 2404 return (++_nof_overflow_traps); 2405 } 2406 } 2407 2408 uint overflow_trap_count() const { 2409 return _nof_overflow_traps; 2410 } 2411 uint overflow_recompile_count() const { 2412 return _nof_overflow_recompiles; 2413 } 2414 void inc_overflow_recompile_count() { 2415 _nof_overflow_recompiles += 1; 2416 } 2417 uint decompile_count() const { 2418 return _nof_decompiles; 2419 } 2420 void inc_decompile_count() { 2421 _nof_decompiles += 1; 2422 if (decompile_count() > (uint)PerMethodRecompilationCutoff) { 2423 method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff"); 2424 } 2425 }
2429 void inc_tenure_traps() { 2430 _tenure_traps += 1; 2431 } 2432 2433 // Return pointer to area dedicated to parameters in MDO 2434 ParametersTypeData* parameters_type_data() const { 2435 assert(_parameters_type_data_di != parameters_uninitialized, "called too early"); 2436 return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL; 2437 } 2438 2439 int parameters_type_data_di() const { 2440 assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data"); 2441 return _parameters_type_data_di; 2442 } 2443 2444 // Support for code generation 2445 static ByteSize data_offset() { 2446 return byte_offset_of(MethodData, _data[0]); 2447 } 2448
2449 static ByteSize invocation_counter_offset() { 2450 return byte_offset_of(MethodData, _invocation_counter); 2451 } 2452 2453 static ByteSize backedge_counter_offset() { 2454 return byte_offset_of(MethodData, _backedge_counter); 2455 } 2456 2457 static ByteSize invoke_mask_offset() { 2458 return byte_offset_of(MethodData, _invoke_mask); 2459 } 2460 2461 static ByteSize backedge_mask_offset() { 2462 return byte_offset_of(MethodData, _backedge_mask); 2463 } 2464 2465 static ByteSize parameters_type_data_di_offset() { 2466 return byte_offset_of(MethodData, _parameters_type_data_di); 2467 } 2468
523 524 virtual void print_data_on(outputStream* st, const char* extra = NULL) const { 525 ShouldNotReachHere(); 526 } 527 528 void print_data_on(outputStream* st, const MethodData* md) const; 529 530 void print_shared(outputStream* st, const char* name, const char* extra) const; 531 void tab(outputStream* st, bool first = false) const; 532 }; 533 534 // BitData 535 // 536 // A BitData holds a flag or two in its header. 537 class BitData : public ProfileData { 538 protected: 539 enum { 540 // null_seen: 541 // saw a null operand (cast/aastore/instanceof) 542 null_seen_flag = DataLayout::first_flag + 0 543 #if INCLUDE_JVMCI 544 // bytecode threw any exception 545 , exception_seen_flag = null_seen_flag + 1 546 #endif 547 }; 548 enum { bit_cell_count = 0 }; // no additional data fields needed. 549 public: 550 BitData(DataLayout* layout) : ProfileData(layout) { 551 } 552 553 virtual bool is_BitData() const { return true; } 554 555 static int static_cell_count() { 556 return bit_cell_count; 557 } 558 559 virtual int cell_count() const { 560 return static_cell_count(); 561 } 562 563 // Accessor 564 565 // The null_seen flag bit is specially known to the interpreter. 566 // Consulting it allows the compiler to avoid setting up null_check traps. 567 bool null_seen() { return flag_at(null_seen_flag); } 568 void set_null_seen() { set_flag_at(null_seen_flag); } 569 570 #if INCLUDE_JVMCI 571 // true if an exception was thrown at the specific BCI 572 bool exception_seen() { return flag_at(exception_seen_flag); } 573 void set_exception_seen() { set_flag_at(exception_seen_flag); } 574 #endif 575 576 // Code generation support 577 static int null_seen_byte_constant() { 578 return flag_number_to_byte_constant(null_seen_flag); 579 } 580 581 static ByteSize bit_data_size() { 582 return cell_offset(bit_cell_count); 583 } 584 585 #ifdef CC_INTERP 586 static int bit_data_size_in_bytes() { 587 return cell_offset_in_bytes(bit_cell_count); 588 } 589 590 static void set_null_seen(DataLayout* layout) { 591 set_flag_at(layout, null_seen_flag); 592 } 593 594 static DataLayout* advance(DataLayout* layout) {
1158 if (has_arguments()) { 1159 _args.clean_weak_klass_links(is_alive_closure); 1160 } 1161 if (has_return()) { 1162 _ret.clean_weak_klass_links(is_alive_closure); 1163 } 1164 } 1165 1166 virtual void print_data_on(outputStream* st, const char* extra = NULL) const; 1167 }; 1168 1169 // ReceiverTypeData 1170 // 1171 // A ReceiverTypeData is used to access profiling information about a 1172 // dynamic type check. It consists of a counter which counts the total times 1173 // that the check is reached, and a series of (Klass*, count) pairs 1174 // which are used to store a type profile for the receiver of the check. 1175 class ReceiverTypeData : public CounterData { 1176 protected: 1177 enum { 1178 #if INCLUDE_JVMCI 1179 // Description of the different counters 1180 // ReceiverTypeData for instanceof/checkcast/aastore: 1181 // C1/C2: count is incremented on type overflow and decremented for failed type checks 1182 // JVMCI: count decremented for failed type checks and nonprofiled_count is incremented on type overflow 1183 // TODO (chaeubl): in fact, JVMCI should also increment the count for failed type checks to mimic the C1/C2 behavior 1184 // VirtualCallData for invokevirtual/invokeinterface: 1185 // C1/C2: count is incremented on type overflow 1186 // JVMCI: count is incremented on type overflow, nonprofiled_count is incremented on method overflow 1187 1188 // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile 1189 nonprofiled_count_off_set = counter_cell_count, 1190 receiver0_offset, 1191 #else 1192 receiver0_offset = counter_cell_count, 1193 #endif 1194 count0_offset, 1195 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset 1196 }; 1197 1198 public: 1199 ReceiverTypeData(DataLayout* layout) : CounterData(layout) { 1200 assert(layout->tag() == DataLayout::receiver_type_data_tag || 1201 layout->tag() == DataLayout::virtual_call_data_tag || 1202 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); 1203 } 1204 1205 virtual bool is_ReceiverTypeData() const { return true; } 1206 1207 static int static_cell_count() { 1208 return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1); 1209 } 1210 1211 virtual int cell_count() const { 1212 return static_cell_count(); 1213 } 1214 1215 // Direct accessors 1216 static uint row_limit() { 1217 return TypeProfileWidth; 1218 } 1219 static int receiver_cell_index(uint row) { 1220 return receiver0_offset + row * receiver_type_row_cell_count; 1221 } 1222 static int receiver_count_cell_index(uint row) { 1223 return count0_offset + row * receiver_type_row_cell_count; 1224 } 1225 1226 Klass* receiver(uint row) const { 1227 assert(row < row_limit(), "oob"); 1228
1250 assert(row < row_limit(), "oob"); 1251 // Clear total count - indicator of polymorphic call site. 1252 // The site may look like as monomorphic after that but 1253 // it allow to have more accurate profiling information because 1254 // there was execution phase change since klasses were unloaded. 1255 // If the site is still polymorphic then MDO will be updated 1256 // to reflect it. But it could be the case that the site becomes 1257 // only bimorphic. Then keeping total count not 0 will be wrong. 1258 // Even if we use monomorphic (when it is not) for compilation 1259 // we will only have trap, deoptimization and recompile again 1260 // with updated MDO after executing method in Interpreter. 1261 // An additional receiver will be recorded in the cleaned row 1262 // during next call execution. 1263 // 1264 // Note: our profiling logic works with empty rows in any slot. 1265 // We do sorting a profiling info (ciCallProfile) for compilation. 1266 // 1267 set_count(0); 1268 set_receiver(row, NULL); 1269 set_receiver_count(row, 0); 1270 #if INCLUDE_JVMCI 1271 if (!this->is_VirtualCallData()) { 1272 // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count 1273 // must also be reset (see "Description of the different counters" above) 1274 set_nonprofiled_count(0); 1275 } 1276 #endif 1277 } 1278 1279 // Code generation support 1280 static ByteSize receiver_offset(uint row) { 1281 return cell_offset(receiver_cell_index(row)); 1282 } 1283 static ByteSize receiver_count_offset(uint row) { 1284 return cell_offset(receiver_count_cell_index(row)); 1285 } 1286 #if INCLUDE_JVMCI 1287 static ByteSize nonprofiled_receiver_count_offset() { 1288 return cell_offset(nonprofiled_count_off_set); 1289 } 1290 uint nonprofiled_count() const { 1291 return uint_at(nonprofiled_count_off_set); 1292 } 1293 void set_nonprofiled_count(uint count) { 1294 set_uint_at(nonprofiled_count_off_set, count); 1295 } 1296 #endif 1297 static ByteSize receiver_type_data_size() { 1298 return cell_offset(static_cell_count()); 1299 } 1300 1301 // GC support 1302 virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); 1303 1304 #ifdef CC_INTERP 1305 static int receiver_type_data_size_in_bytes() { 1306 return cell_offset_in_bytes(static_cell_count()); 1307 } 1308 1309 static Klass receiver_unchecked(DataLayout layout, uint row) { 1310 Klass* recv = (Klass*)layout->cell_at(receiver_cell_index(row)); 1311 return recv; 1312 } 1313 1314 static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) { 1315 const int num_rows = row_limit(); 1316 // Receiver already exists?
1341 void print_receiver_data_on(outputStream* st) const; 1342 void print_data_on(outputStream* st, const char* extra = NULL) const; 1343 }; 1344 1345 // VirtualCallData 1346 // 1347 // A VirtualCallData is used to access profiling information about a 1348 // virtual call. For now, it has nothing more than a ReceiverTypeData. 1349 class VirtualCallData : public ReceiverTypeData { 1350 public: 1351 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) { 1352 assert(layout->tag() == DataLayout::virtual_call_data_tag || 1353 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); 1354 } 1355 1356 virtual bool is_VirtualCallData() const { return true; } 1357 1358 static int static_cell_count() { 1359 // At this point we could add more profile state, e.g., for arguments. 1360 // But for now it's the same size as the base record type. 1361 return ReceiverTypeData::static_cell_count() JVMCI_ONLY(+ (uint) MethodProfileWidth * receiver_type_row_cell_count); 1362 } 1363 1364 virtual int cell_count() const { 1365 return static_cell_count(); 1366 } 1367 1368 // Direct accessors 1369 static ByteSize virtual_call_data_size() { 1370 return cell_offset(static_cell_count()); 1371 } 1372 1373 #ifdef CC_INTERP 1374 static int virtual_call_data_size_in_bytes() { 1375 return cell_offset_in_bytes(static_cell_count()); 1376 } 1377 1378 static DataLayout* advance(DataLayout* layout) { 1379 return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes()); 1380 } 1381 #endif // CC_INTERP 1382 1383 #if INCLUDE_JVMCI 1384 static ByteSize method_offset(uint row) { 1385 return cell_offset(method_cell_index(row)); 1386 } 1387 static ByteSize method_count_offset(uint row) { 1388 return cell_offset(method_count_cell_index(row)); 1389 } 1390 static int method_cell_index(uint row) { 1391 return receiver0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count; 1392 } 1393 static int method_count_cell_index(uint row) { 1394 return count0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count; 1395 } 1396 static uint method_row_limit() { 1397 return MethodProfileWidth; 1398 } 1399 1400 Method* method(uint row) const { 1401 assert(row < method_row_limit(), "oob"); 1402 1403 Method* method = (Method*)intptr_at(method_cell_index(row)); 1404 assert(method == NULL || method->is_method(), "must be"); 1405 return method; 1406 } 1407 1408 uint method_count(uint row) const { 1409 assert(row < method_row_limit(), "oob"); 1410 return uint_at(method_count_cell_index(row)); 1411 } 1412 1413 void set_method(uint row, Method* m) { 1414 assert((uint)row < method_row_limit(), "oob"); 1415 set_intptr_at(method_cell_index(row), (uintptr_t)m); 1416 } 1417 1418 void set_method_count(uint row, uint count) { 1419 assert(row < method_row_limit(), "oob"); 1420 set_uint_at(method_count_cell_index(row), count); 1421 } 1422 1423 void clear_method_row(uint row) { 1424 assert(row < method_row_limit(), "oob"); 1425 // Clear total count - indicator of polymorphic call site (see comment for clear_row() in ReceiverTypeData). 1426 set_nonprofiled_count(0); 1427 set_method(row, NULL); 1428 set_method_count(row, 0); 1429 } 1430 1431 // GC support 1432 virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); 1433 1434 // Redefinition support 1435 virtual void clean_weak_method_links(); 1436 #endif 1437 1438 #if INCLUDE_JVMCI 1439 void print_method_data_on(outputStream* st) const; 1440 #endif 1441 void print_data_on(outputStream* st, const char* extra = NULL) const; 1442 }; 1443 1444 // VirtualCallTypeData 1445 // 1446 // A VirtualCallTypeData is used to access profiling information about 1447 // a virtual call for which we collect type information about 1448 // arguments and return value. 1449 class VirtualCallTypeData : public VirtualCallData { 1450 private: 1451 // entries for arguments if any 1452 TypeStackSlotEntries _args; 1453 // entry for return type if any 1454 ReturnTypeEntry _ret; 1455 1456 int cell_count_global_offset() const { 1457 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset(); 1458 } 1459 1460 // number of cells not counting the header
2136 private: 2137 friend class ProfileData; 2138 2139 // Back pointer to the Method* 2140 Method* _method; 2141 2142 // Size of this oop in bytes 2143 int _size; 2144 2145 // Cached hint for bci_to_dp and bci_to_data 2146 int _hint_di; 2147 2148 Mutex _extra_data_lock; 2149 2150 MethodData(methodHandle method, int size, TRAPS); 2151 public: 2152 static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS); 2153 MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData 2154 2155 bool is_methodData() const volatile { return true; } 2156 void initialize(); 2157 2158 // Whole-method sticky bits and flags 2159 enum { 2160 _trap_hist_limit = 22 JVMCI_ONLY(+5), // decoupled from Deoptimization::Reason_LIMIT 2161 _trap_hist_mask = max_jubyte, 2162 _extra_data_count = 4 // extra DataLayout headers, for trap history 2163 }; // Public flag values 2164 private: 2165 uint _nof_decompiles; // count of all nmethod removals 2166 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits 2167 uint _nof_overflow_traps; // trap count, excluding _trap_hist 2168 union { 2169 intptr_t _align; 2170 u1 _array[_trap_hist_limit]; 2171 } _trap_hist; 2172 2173 // Support for interprocedural escape analysis, from Thomas Kotzmann. 2174 intx _eflags; // flags on escape information 2175 intx _arg_local; // bit set of non-escaping arguments 2176 intx _arg_stack; // bit set of stack-allocatable arguments 2177 intx _arg_returned; // bit set of returned arguments 2178 2179 int _creation_mileage; // method mileage at MDO creation 2180
2188 // Counter values at the time profiling started. 2189 int _invocation_counter_start; 2190 int _backedge_counter_start; 2191 uint _tenure_traps; 2192 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog 2193 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog 2194 2195 #if INCLUDE_RTM_OPT 2196 // State of RTM code generation during compilation of the method 2197 int _rtm_state; 2198 #endif 2199 2200 // Number of loops and blocks is computed when compiling the first 2201 // time with C1. It is used to determine if method is trivial. 2202 short _num_loops; 2203 short _num_blocks; 2204 // Does this method contain anything worth profiling? 2205 enum WouldProfile {unknown, no_profile, profile}; 2206 WouldProfile _would_profile; 2207 2208 #if INCLUDE_JVMCI 2209 // Support for HotSpotMethodData.setCompiledIRSize(int) 2210 int _jvmci_ir_size; 2211 #endif 2212 2213 // Size of _data array in bytes. (Excludes header and extra_data fields.) 2214 int _data_size; 2215 2216 // data index for the area dedicated to parameters. -1 if no 2217 // parameter profiling. 2218 enum { no_parameters = -2, parameters_uninitialized = -1 }; 2219 int _parameters_type_data_di; 2220 int parameters_size_in_bytes() const { 2221 ParametersTypeData* param = parameters_type_data(); 2222 return param == NULL ? 0 : param->size_in_bytes(); 2223 } 2224 2225 // Beginning of the data entries 2226 intptr_t _data[1]; 2227 2228 // Helper for size computation 2229 static int compute_data_size(BytecodeStream* stream); 2230 static int bytecode_cell_count(Bytecodes::Code code); 2231 static bool is_speculative_trap_bytecode(Bytecodes::Code code); 2232 enum { no_profile_data = -1, variable_cell_count = -2 };
2471 } 2472 // If SpeculativeTrapData allocation fails try to allocate a 2473 // regular entry 2474 data = bci_to_data(bci); 2475 if (data != NULL) { 2476 return data; 2477 } 2478 return bci_to_extra_data(bci, NULL, true); 2479 } 2480 2481 // Add a handful of extra data records, for trap tracking. 2482 DataLayout* extra_data_base() const { return limit_data_position(); } 2483 DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); } 2484 DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() - 2485 parameters_size_in_bytes()); } 2486 int extra_data_size() const { return (address)extra_data_limit() - (address)extra_data_base(); } 2487 static DataLayout* next_extra(DataLayout* dp); 2488 2489 // Return (uint)-1 for overflow. 2490 uint trap_count(int reason) const { 2491 assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob"); 2492 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1; 2493 } 2494 // For loops: 2495 static uint trap_reason_limit() { return _trap_hist_limit; } 2496 static uint trap_count_limit() { return _trap_hist_mask; } 2497 uint inc_trap_count(int reason) { 2498 // Count another trap, anywhere in this method. 2499 assert(reason >= 0, "must be single trap"); 2500 assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob"); 2501 uint cnt1 = 1 + _trap_hist._array[reason]; 2502 if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow... 2503 _trap_hist._array[reason] = cnt1; 2504 return cnt1; 2505 } else { 2506 return _trap_hist_mask + (++_nof_overflow_traps); 2507 }
2508 } 2509 2510 uint overflow_trap_count() const { 2511 return _nof_overflow_traps; 2512 } 2513 uint overflow_recompile_count() const { 2514 return _nof_overflow_recompiles; 2515 } 2516 void inc_overflow_recompile_count() { 2517 _nof_overflow_recompiles += 1; 2518 } 2519 uint decompile_count() const { 2520 return _nof_decompiles; 2521 } 2522 void inc_decompile_count() { 2523 _nof_decompiles += 1; 2524 if (decompile_count() > (uint)PerMethodRecompilationCutoff) { 2525 method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff"); 2526 } 2527 }
2531 void inc_tenure_traps() { 2532 _tenure_traps += 1; 2533 } 2534 2535 // Return pointer to area dedicated to parameters in MDO 2536 ParametersTypeData* parameters_type_data() const { 2537 assert(_parameters_type_data_di != parameters_uninitialized, "called too early"); 2538 return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL; 2539 } 2540 2541 int parameters_type_data_di() const { 2542 assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data"); 2543 return _parameters_type_data_di; 2544 } 2545 2546 // Support for code generation 2547 static ByteSize data_offset() { 2548 return byte_offset_of(MethodData, _data[0]); 2549 } 2550 2551 static ByteSize trap_history_offset() { 2552 return byte_offset_of(MethodData, _trap_hist._array); 2553 } 2554 2555 static ByteSize invocation_counter_offset() { 2556 return byte_offset_of(MethodData, _invocation_counter); 2557 } 2558 2559 static ByteSize backedge_counter_offset() { 2560 return byte_offset_of(MethodData, _backedge_counter); 2561 } 2562 2563 static ByteSize invoke_mask_offset() { 2564 return byte_offset_of(MethodData, _invoke_mask); 2565 } 2566 2567 static ByteSize backedge_mask_offset() { 2568 return byte_offset_of(MethodData, _backedge_mask); 2569 } 2570 2571 static ByteSize parameters_type_data_di_offset() { 2572 return byte_offset_of(MethodData, _parameters_type_data_di); 2573 } 2574