jdk8/jdk8/hotspot: 46f6f063b272 (original) (raw)
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -51,6 +51,16 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry);
- if (_info->deoptimize_on_exception()) {
- address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
- __ call(a, relocInfo::runtime_call_type);
- __ delayed()->nop();
- ce->add_call_info_here(_info);
- ce->verify_oop_map(_info);
- debug_only(__ should_not_reach_here());
- return;
- } + if (_index->is_register()) { __ mov(_index->as_register(), G4); } else { @@ -64,11 +74,22 @@ __ delayed()->nop(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); -#ifdef ASSERT
- debug_only(__ should_not_reach_here()); +} + +PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
- _info = new CodeEmitInfo(info); } +void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
- address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
- __ call(a, relocInfo::runtime_call_type);
- __ delayed()->nop();
- ce->add_call_info_here(_info);
- ce->verify_oop_map(_info);
- debug_only(__ should_not_reach_here()); +} void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); @@ -99,10 +120,17 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
- address a;
- if (_info->deoptimize_on_exception()) {
- // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
- a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
- } else {
- a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
- } + ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); __ bind(_entry);
- __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
relocInfo::runtime_call_type);[](#l1.61)
- __ call(a, relocInfo::runtime_call_type); __ delayed()->nop(); ce->add_call_info_here(_info); ce->verify_oop_map(_info);
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -3361,6 +3361,45 @@ __ mov(G2_thread, result_reg->as_register()); } +#ifdef ASSERT +// emit run-time assertion +void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
- assert(op->code() == lir_assert, "must be"); +
- if (op->in_opr1()->is_valid()) {
- assert(op->in_opr2()->is_valid(), "both operands must be valid");
- comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
- } else {
- assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
- assert(op->condition() == lir_cond_always, "no other conditions allowed");
- } +
- Label ok;
- if (op->condition() != lir_cond_always) {
- Assembler::Condition acond;
- switch (op->condition()) {
case lir_cond_equal: acond = Assembler::equal; break;[](#l2.24)
case lir_cond_notEqual: acond = Assembler::notEqual; break;[](#l2.25)
case lir_cond_less: acond = Assembler::less; break;[](#l2.26)
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;[](#l2.27)
case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;[](#l2.28)
case lir_cond_greater: acond = Assembler::greater; break;[](#l2.29)
case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;[](#l2.30)
case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;[](#l2.31)
default: ShouldNotReachHere();[](#l2.32)
- };
- __ br(acond, false, Assembler::pt, ok);
- __ delayed()->nop();
- }
- if (op->halt()) {
- const char* str = __ code_string(op->msg());
- __ stop(str);
- } else {
- breakpoint();
- }
- __ bind(ok); +} +#endif void LIR_Assembler::peephole(LIR_List* lir) { LIR_OpList* inst = lir->instructions_list();
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -324,7 +324,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check(); bool use_length = x->length() != NULL; bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || @@ -339,12 +339,9 @@ array.load_item(); index.load_nonconstant();
- if (use_length) {
- needs_range_check = x->compute_needs_range_check();
- if (needs_range_check) {
length.set_instruction(x->length());[](#l3.19)
length.load_item();[](#l3.20)
- }
} if (needs_store_check) { value.load_item();
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -987,6 +987,25 @@ break; #endif // INCLUDE_ALL_GCS
- case predicate_failed_trap_id:
{[](#l4.8)
__ set_info("predicate_failed_trap", dont_gc_arguments);[](#l4.9)
OopMap* oop_map = save_live_registers(sasm);[](#l4.10)
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));[](#l4.12)
oop_maps = new OopMapSet();[](#l4.14)
oop_maps->add_gc_map(call_offset, oop_map);[](#l4.15)
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();[](#l4.17)
assert(deopt_blob != NULL, "deoptimization blob must have been created");[](#l4.18)
restore_live_registers(sasm);[](#l4.19)
__ restore();[](#l4.20)
__ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);[](#l4.21)
__ delayed()->nop();[](#l4.22)
}[](#l4.23)
break;[](#l4.24)
+ default: { __ set_info("unimplemented entry", dont_gc_arguments); __ save_frame(0);
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -101,6 +101,15 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry);
- if (_info->deoptimize_on_exception()) {
- address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
- __ call(RuntimeAddress(a));
- ce->add_call_info_here(_info);
- ce->verify_oop_map(_info);
- debug_only(__ should_not_reach_here());
- return;
- } + // pass the array index on stack because all registers must be preserved if (_index->is_cpu_register()) { ce->store_parameter(_index->as_register(), 0); @@ -115,9 +124,22 @@ } __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); ce->add_call_info_here(_info);
- ce->verify_oop_map(info); debug_only(_ should_not_reach_here()); } +PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
- _info = new CodeEmitInfo(info); +} + +void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
- address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
- __ call(RuntimeAddress(a));
- ce->add_call_info_here(_info);
- ce->verify_oop_map(_info);
- debug_only(__ should_not_reach_here()); +} void DivByZeroStub::emit_code(LIR_Assembler* ce) { if (_offset != -1) { @@ -414,10 +436,19 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
- address a;
- if (_info->deoptimize_on_exception()) {
- // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
- a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
- } else {
- a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
- } + ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); __ bind(_entry);
- __ call(RuntimeAddress(a)); ce->add_call_info_here(_info);
- ce->verify_oop_map(info); debug_only(_ should_not_reach_here()); }
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -3755,6 +3755,44 @@ } } +#ifdef ASSERT +// emit run-time assertion +void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
- assert(op->code() == lir_assert, "must be"); +
- if (op->in_opr1()->is_valid()) {
- assert(op->in_opr2()->is_valid(), "both operands must be valid");
- comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
- } else {
- assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
- assert(op->condition() == lir_cond_always, "no other conditions allowed");
- } +
- Label ok;
- if (op->condition() != lir_cond_always) {
- Assembler::Condition acond = Assembler::zero;
- switch (op->condition()) {
case lir_cond_equal: acond = Assembler::equal; break;[](#l6.24)
case lir_cond_notEqual: acond = Assembler::notEqual; break;[](#l6.25)
case lir_cond_less: acond = Assembler::less; break;[](#l6.26)
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;[](#l6.27)
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;[](#l6.28)
case lir_cond_greater: acond = Assembler::greater; break;[](#l6.29)
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;[](#l6.30)
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;[](#l6.31)
default: ShouldNotReachHere();[](#l6.32)
- }
- __ jcc(acond, ok);
- }
- if (op->halt()) {
- const char* str = __ code_string(op->msg());
- __ stop(str);
- } else {
- breakpoint();
- }
- __ bind(ok); +} +#endif void LIR_Assembler::membar() { // QQQ sparc TSO uses this,
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -263,7 +263,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check(); bool use_length = x->length() != NULL; bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || @@ -278,12 +278,10 @@ array.load_item(); index.load_nonconstant();
- if (use_length) {
- needs_range_check = x->compute_needs_range_check();
- if (needs_range_check) {
length.set_instruction(x->length());[](#l7.19)
length.load_item();[](#l7.20)
- }
+ } if (needs_store_check) { value.load_item();
--- a/src/cpu/x86/vm/c1_LinearScan_x86.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/x86/vm/c1_LinearScan_x86.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -675,7 +675,8 @@ switch (op2->code()) { case lir_cmp: case lir_cmp_fd2i:
- case lir_ucmp_fd2i:
- case lir_assert: { assert(left->is_fpu_register(), "invalid LIR"); assert(right->is_fpu_register(), "invalid LIR");
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -1807,6 +1807,24 @@ break; #endif // INCLUDE_ALL_GCS
- case predicate_failed_trap_id:
{[](#l9.8)
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);[](#l9.9)
OopMap* map = save_live_registers(sasm, 1);[](#l9.11)
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));[](#l9.13)
oop_maps = new OopMapSet();[](#l9.14)
oop_maps->add_gc_map(call_offset, map);[](#l9.15)
restore_live_registers(sasm);[](#l9.16)
__ leave();[](#l9.17)
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();[](#l9.18)
assert(deopt_blob != NULL, "deoptimization blob must have been created");[](#l9.19)
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));[](#l9.21)
}[](#l9.22)
break;[](#l9.23)
+ default: { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); __ movptr(rax, (int)id);
--- a/src/share/vm/c1/c1_Canonicalizer.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -937,4 +937,6 @@ void Canonicalizer::do_ProfileCall(ProfileCall* x) {} void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {} void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {} +void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {} +void Canonicalizer::do_Assert(Assert* x) {} void Canonicalizer::do_MemBar(MemBar* x) {}
--- a/src/share/vm/c1/c1_Canonicalizer.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Canonicalizer.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -107,6 +107,8 @@ virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x);
#endif // SHARE_VM_C1_C1_CANONICALIZER_HPP
--- a/src/share/vm/c1/c1_CodeStubs.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_CodeStubs.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -166,6 +166,22 @@ #endif // PRODUCT }; +// stub used when predicate fails and deoptimization is needed +class PredicateFailedStub: public CodeStub {
- private:
- CodeEmitInfo* _info; +
- public:
- PredicateFailedStub(CodeEmitInfo* info);
- virtual void emit_code(LIR_Assembler* e);
- virtual CodeEmitInfo* info() const { return _info; }
- virtual void visit(LIR_OpVisitState* visitor) {
- visitor->do_slow_case(_info);
- } +#ifndef PRODUCT
- virtual void print_name(outputStream* out) const { out->print("PredicateFailedStub"); } +#endif // PRODUCT +}; class DivByZeroStub: public CodeStub { private:
--- a/src/share/vm/c1/c1_Compilation.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Compilation.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -33,13 +33,16 @@ #include "c1/c1_ValueStack.hpp" #include "code/debugInfoRec.hpp" #include "compiler/compileLog.hpp" +#include "c1/c1_RangeCheckElimination.hpp" typedef enum { _t_compile, _t_setup,
- _t_optimize_blocks,
- _t_optimize_null_checks,
- _t_rangeCheckElimination, _t_emit_lir, _t_linearScan, _t_lirGeneration, @@ -52,8 +55,10 @@ static const char * timer_name[] = { "compile", "setup",
- "optimize_blocks",
- "optimize_null_checks",
- "rangeCheckElimination", "emit_lir", "linearScan", "lirGeneration", @@ -159,9 +164,9 @@ if (UseC1Optimizations) { NEEDS_CLEANUP // optimization
} _hir->verify(); @@ -180,13 +185,47 @@ _hir->compute_code(); if (UseGlobalValueNumbering) {
- // No resource mark here! LoopInvariantCodeMotion can allocate ValueStack objects. int instructions = Instruction::number_of_instructions(); GlobalValueNumbering gvn(_hir); assert(instructions == Instruction::number_of_instructions(), "shouldn't have created an instructions");
- _hir->verify(); + +#ifndef PRODUCT
- if (PrintCFGToFile) {
- CFGPrinter::print_cfg(_hir, "Before RangeCheckElimination", true, false);
- } +#endif +
- if (RangeCheckElimination) {
- if (_hir->osr_entry() == NULL) {
PhaseTraceTime timeit(_t_rangeCheckElimination);[](#l13.67)
RangeCheckElimination::eliminate(_hir);[](#l13.68)
- }
- } + +#ifndef PRODUCT
- if (PrintCFGToFile) {
- CFGPrinter::print_cfg(_hir, "After RangeCheckElimination", true, false);
- } +#endif +
- if (UseC1Optimizations) {
- // loop invariant code motion reorders instructions and range
- // check elimination adds new instructions so do null check
- // elimination after.
- NEEDS_CLEANUP
- // optimization
- PhaseTraceTime timeit(_t_optimize_null_checks);
- _hir->eliminate_null_checks();
- } +
- _hir->verify(); + // compute use counts after global value numbering _hir->compute_use_counts(); @@ -502,6 +541,7 @@ , _next_id(0) , _next_block_id(0) , _code(buffer_blob) +, _has_access_indexed(false) , _current_instruction(NULL)
#ifndef PRODUCT , _last_instruction_printed(NULL) @@ -567,7 +607,9 @@ tty->print_cr(" Detailed C1 Timings"); tty->print_cr(" Setup time: %6.3f s (%4.1f%%)", timers[_t_setup].seconds(), (timers[_t_setup].seconds() / total) * 100.0); tty->print_cr(" Build IR: %6.3f s (%4.1f%%)", timers[_t_buildIR].seconds(), (timers[_t_buildIR].seconds() / total) * 100.0);
- tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", timers[_t_optimizeIR].seconds(), (timers[_t_optimizeIR].seconds() / total) * 100.0);
- float t_optimizeIR = timers[_t_optimize_blocks].seconds() + timers[_t_optimize_null_checks].seconds();
- tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", t_optimizeIR, (t_optimizeIR / total) * 100.0);
- tty->print_cr(" RCE: %6.3f s (%4.1f%%)", timers[_t_rangeCheckElimination].seconds(), (timers[_t_rangeCheckElimination].seconds() / total) * 100.0); tty->print_cr(" Emit LIR: %6.3f s (%4.1f%%)", timers[_t_emit_lir].seconds(), (timers[_t_emit_lir].seconds() / total) * 100.0); tty->print_cr(" LIR Gen: %6.3f s (%4.1f%%)", timers[_t_lirGeneration].seconds(), (timers[_t_lirGeneration].seconds() / total) * 100.0); tty->print_cr(" Linear Scan: %6.3f s (%4.1f%%)", timers[_t_linearScan].seconds(), (timers[_t_linearScan].seconds() / total) * 100.0);
--- a/src/share/vm/c1/c1_Compilation.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Compilation.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -26,8 +26,10 @@ #define SHARE_VM_C1_C1_COMPILATION_HPP #include "ci/ciEnv.hpp" +#include "ci/ciMethodData.hpp" #include "code/exceptionHandlerTable.hpp" #include "memory/resourceArea.hpp" +#include "runtime/deoptimization.hpp" class CompilationResourceObj; class XHandlers; @@ -85,6 +87,7 @@ LinearScan* _allocator; CodeOffsets _offsets; CodeBuffer _code;
- bool _has_access_indexed; // compilation helpers void initialize(); @@ -140,6 +143,7 @@ C1_MacroAssembler* masm() const { return _masm; } CodeOffsets* offsets() { return &_offsets; } Arena* arena() { return _arena; }
- bool has_access_indexed() { return _has_access_indexed; } // Instruction ids int get_next_id() { return _next_id++; } @@ -154,6 +158,7 @@ void set_has_fpu_code(bool f) { _has_fpu_code = f; } void set_has_unsafe_access(bool f) { _has_unsafe_access = f; } void set_would_profile(bool f) { _would_profile = f; }
- void set_has_access_indexed(bool f) { _has_access_indexed = f; } // Add a set of exception handlers covering the given PC offset void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers); // Statistics gathering @@ -233,6 +238,14 @@ return env()->comp_level() == CompLevel_full_profile && C1UpdateMethodData && C1ProfileCheckcasts; } +
- // will compilation make optimistic assumptions that might lead to
- // deoptimization and that the runtime will account for?
- bool is_optimistic() const {
- return !TieredCompilation &&
(RangeCheckElimination || UseLoopInvariantCodeMotion) &&[](#l14.47)
method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;[](#l14.48)
- } };
--- a/src/share/vm/c1/c1_GraphBuilder.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -947,7 +947,9 @@ void GraphBuilder::load_indexed(BasicType type) {
- // In case of in block code motion in range check elimination
- ValueStack* state_before = copy_state_indexed_access();
- compilation()->set_has_access_indexed(true); Value index = ipop(); Value array = apop(); Value length = NULL; @@ -961,7 +963,9 @@ void GraphBuilder::store_indexed(BasicType type) {
- // In case of in block code motion in range check elimination
- ValueStack* state_before = copy_state_indexed_access();
- compilation()->set_has_access_indexed(true); Value value = pop(as_ValueType(type)); Value index = ipop(); Value array = apop(); @@ -1179,7 +1183,9 @@ BlockBegin* tsux = block_at(stream()->get_dest()); BlockBegin* fsux = block_at(stream()->next_bci()); bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
- // In case of loop invariant code motion or predicate insertion
- // before the body of a loop the state is needed
- Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb)); assert(i->as_Goto() == NULL || (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
@@ -1294,7 +1300,9 @@ BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0)); BlockBegin* fsux = block_at(bci() + sw.default_offset()); bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
- // In case of loop invariant code motion or predicate insertion
- // before the body of a loop the state is needed
- ValueStack* state_before = copy_state_if_bb(is_bb); append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); } else { // collect successors @@ -1308,7 +1316,9 @@ // add default successor if (sw.default_offset() < 0) has_bb = true; sux->at_put(i, block_at(bci() + sw.default_offset()));
- // In case of loop invariant code motion or predicate insertion
- // before the body of a loop the state is needed
- ValueStack* state_before = copy_state_if_bb(has_bb); Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
#ifdef ASSERT if (res->as_Goto()) { @@ -1336,7 +1346,9 @@ BlockBegin* tsux = block_at(bci() + pair.offset()); BlockBegin* fsux = block_at(bci() + sw.default_offset()); bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
- // In case of loop invariant code motion or predicate insertion
- // before the body of a loop the state is needed
- ValueStack* state_before = copy_state_if_bb(is_bb);; append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); } else { // collect successors & keys @@ -1353,7 +1365,9 @@ // add default successor if (sw.default_offset() < 0) has_bb = true; sux->at_put(i, block_at(bci() + sw.default_offset()));
- // In case of loop invariant code motion or predicate insertion
- // before the body of a loop the state is needed
- ValueStack* state_before = copy_state_if_bb(has_bb); Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
#ifdef ASSERT if (res->as_Goto()) {
--- a/src/share/vm/c1/c1_GraphBuilder.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_GraphBuilder.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -301,6 +301,8 @@ ValueStack* copy_state_exhandling(); ValueStack* copy_state_for_exception_with_bci(int bci); ValueStack* copy_state_for_exception();
- ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : NULL; }
- ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); } // // Inlining support
--- a/src/share/vm/c1/c1_IR.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_IR.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -182,13 +182,14 @@ // Implementation of CodeEmitInfo // Stack must be NON-null -CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers) +CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception) : _scope(stack->scope()) , _scope_debug_info(NULL) , _oop_map(NULL) , _stack(stack) , _exception_handlers(exception_handlers)
- , _is_method_handle_invoke(false)
- , _deoptimize_on_exception(deoptimize_on_exception) { assert(_stack != NULL, "must be non null"); } @@ -199,7 +200,8 @@ , _scope_debug_info(NULL) , _oop_map(NULL) , _stack(stack == NULL ? info->_stack : stack)
- , _is_method_handle_invoke(info->_is_method_handle_invoke)
- , _deoptimize_on_exception(info->_deoptimize_on_exception) { // deep copy of exception handlers if (info->_exception_handlers != NULL) { @@ -239,7 +241,7 @@ } -void IR::optimize() { +void IR::optimize_blocks() { Optimizer opt(this); if (!compilation()->profile_branches()) { if (DoCEE) { @@ -257,6 +259,10 @@
#endif } } +} + +void IR::eliminate_null_checks() {
#ifndef PRODUCT @@ -429,6 +435,7 @@ BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop BlockList _work_list; // temporary list (used in mark_loops and compute_order)
- BlockList _loop_headers; Compilation* _compilation; @@ -594,6 +601,7 @@ TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops)); cur->set_loop_index(_num_loops);
- _loop_headers.append(cur); _num_loops++; } @@ -656,6 +664,16 @@ // -> this is not a natural loop, so ignore it TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i));
BlockBegin *loop_header = _loop_headers.at(i);[](#l17.70)
assert(loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Must be loop header");[](#l17.71)
for (int j = 0; j < loop_header->number_of_preds(); j++) {[](#l17.73)
BlockBegin *pred = loop_header->pred_at(j);[](#l17.74)
pred->clear(BlockBegin::linear_scan_loop_end_flag);[](#l17.75)
}[](#l17.76)
loop_header->clear(BlockBegin::linear_scan_loop_header_flag);[](#l17.78)
+ for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) { clear_block_in_loop(i, block_id); } @@ -729,9 +747,20 @@ } else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) { TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id()));
- // Does not hold for exception blocks
- assert(cur->number_of_preds() > 1 || cur->is_set(BlockBegin::exception_entry_flag), ""); cur->set_dominator(common_dominator(cur->dominator(), parent)); } +
- // Additional edge to xhandler of all our successors
- // range check elimination needs that the state at the end of a
- // block be valid in every block it dominates so cur must dominate
- // the exception handlers of its successors.
- int num_cur_xhandler = cur->number_of_exception_handlers();
- for (int j = 0; j < num_cur_xhandler; j++) {
- BlockBegin* xhandler = cur->exception_handler_at(j);
- compute_dominator(xhandler, parent);
- } } @@ -898,7 +927,6 @@ num_sux = cur->number_of_exception_handlers(); for (i = 0; i < num_sux; i++) { BlockBegin* sux = cur->exception_handler_at(i);
compute_dominator(sux, cur);[](#l17.109) if (ready_for_processing(sux)) {[](#l17.110) sort_into_work_list(sux);[](#l17.111) }[](#l17.112)
@@ -918,8 +946,23 @@ BlockBegin* dominator = block->pred_at(0); int num_preds = block->number_of_preds();
- for (int i = 1; i < num_preds; i++) {
dominator = common_dominator(dominator, block->pred_at(i));[](#l17.118)
BlockBegin *pred = block->pred_at(j);[](#l17.124)
TRACE_LINEAR_SCAN(4, tty->print_cr(" DOM: Subrocessing B%d", pred->block_id()));[](#l17.125)
if (block->is_set(BlockBegin::exception_entry_flag)) {[](#l17.127)
dominator = common_dominator(dominator, pred);[](#l17.128)
int num_pred_preds = pred->number_of_preds();[](#l17.129)
for (int k = 0; k < num_pred_preds; k++) {[](#l17.130)
dominator = common_dominator(dominator, pred->pred_at(k));[](#l17.131)
}[](#l17.132)
} else {[](#l17.133)
dominator = common_dominator(dominator, pred);[](#l17.134)
} if (dominator != block->dominator()) { @@ -946,6 +989,21 @@ // check that dominators are correct assert(!compute_dominators_iter(), "fix point not reached"); +}[](#l17.135)
- // Add Blocks to dominates-Array
- int num_blocks = _linear_scan_order->length();
- for (int i = 0; i < num_blocks; i++) {
- BlockBegin* block = _linear_scan_order->at(i);
- BlockBegin *dom = block->dominator();
- if (dom) {
assert(dom->dominator_depth() != -1, "Dominator must have been visited before");[](#l17.151)
dom->dominates()->append(block);[](#l17.152)
block->set_dominator_depth(dom->dominator_depth() + 1);[](#l17.153)
- } else {
block->set_dominator_depth(0);[](#l17.155)
- }
- } } @@ -1032,7 +1090,7 @@ BlockBegin* sux = cur->sux_at(j);
assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) {[](#l17.165)
if (!sux->is_set(BlockBegin::backward_branch_target_flag)) {[](#l17.166) assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");[](#l17.167) }[](#l17.168) if (cur->loop_depth() == sux->loop_depth()) {[](#l17.169)
@@ -1044,7 +1102,7 @@ BlockBegin* pred = cur->pred_at(j); assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number");
if (!cur->is_set(BlockBegin::linear_scan_loop_header_flag)) {[](#l17.174)
if (!cur->is_set(BlockBegin::backward_branch_target_flag)) {[](#l17.175) assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order");[](#l17.176) }[](#l17.177) if (cur->loop_depth() == pred->loop_depth()) {[](#l17.178)
@@ -1060,7 +1118,8 @@ } else { assert(cur->dominator() != NULL, "all but first block must have dominator"); }
- assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0), "Single predecessor must also be dominator");
- // Assertion does not hold for exception handlers
- assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0) || cur->is_set(BlockBegin::exception_entry_flag), "Single predecessor must also be dominator");
} // check that all loops are continuous @@ -1249,9 +1308,22 @@ } }; +class VerifyBlockBeginField : public BlockClosure { + +public: +
- virtual void block_do(BlockBegin *block) {
- for ( Instruction *cur = block; cur != NULL; cur = cur->next()) {
assert(cur->block() == block, "Block begin is not correct");[](#l17.199)
- }
- } +}; + void IR::verify() {
#ifdef ASSERT PredecessorValidator pv(this);
--- a/src/share/vm/c1/c1_IR.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_IR.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -254,6 +254,7 @@ OopMap* _oop_map; ValueStack* _stack; // used by deoptimization (contains also monitors bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
- bool _deoptimize_on_exception; FrameMap* frame_map() const { return scope()->compilation()->frame_map(); } Compilation* compilation() const { return scope()->compilation(); } @@ -261,7 +262,7 @@ public: // use scope from ValueStack
- CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception = false); // make a copy CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack = NULL); @@ -272,6 +273,7 @@ IRScope* scope() const { return _scope; } XHandlers* exception_handlers() const { return _exception_handlers; } ValueStack* stack() const { return _stack; }
- bool deoptimize_on_exception() const { return _deoptimize_on_exception; } void add_register_oop(LIR_Opr opr); void record_debug_info(DebugInformationRecorder* recorder, int pc_offset); @@ -309,7 +311,8 @@ int max_stack() const { return top_scope()->max_stack(); } // expensive // ir manipulation
- void optimize_blocks();
- void eliminate_null_checks(); void compute_predecessors(); void split_critical_edges(); void compute_code();
--- a/src/share/vm/c1/c1_Instruction.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Instruction.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -34,6 +34,15 @@ // Implementation of Instruction +int Instruction::dominator_depth() {
- int result = -1;
- if (block()) {
- result = block()->dominator_depth();
- }
- assert(result != -1 || this->as_Local(), "Only locals have dominator depth -1");
- return result; +} + Instruction::Condition Instruction::mirror(Condition cond) { switch (cond) { case eql: return eql; @@ -42,6 +51,8 @@ case leq: return geq; case gtr: return lss; case geq: return leq;
- case aeq: return beq;
- case beq: return aeq;
} ShouldNotReachHere(); return eql; @@ -56,6 +67,8 @@ case leq: return gtr; case gtr: return leq; case geq: return lss;
- case aeq: assert(false, "Above equal cannot be negated");
- case beq: assert(false, "Below equal cannot be negated");
} ShouldNotReachHere(); return eql; @@ -70,10 +83,10 @@ } } - -Instruction* Instruction::prev(BlockBegin* block) { +// Prev without need to have BlockBegin +Instruction* Instruction::prev() { Instruction* p = NULL;
- Instruction* q = block(); while (q != this) { assert(q != NULL, "this is not in the block's instruction list"); p = q; q = q->next(); @@ -122,15 +135,24 @@ // perform constant and interval tests on index value bool AccessIndexed::compute_needs_range_check() {
- Constant* clength = length()->as_Constant();
- Constant* cindex = index()->as_Constant();
- if (clength && cindex) {
- IntConstant* l = clength->type()->as_IntConstant();
- IntConstant* i = cindex->type()->as_IntConstant();
- if (l && i && i->value() < l->value() && i->value() >= 0) {
return false;[](#l19.61)
- if (length()) { +
- Constant* clength = length()->as_Constant();
- Constant* cindex = index()->as_Constant();
- if (clength && cindex) {
IntConstant* l = clength->type()->as_IntConstant();[](#l19.68)
IntConstant* i = cindex->type()->as_IntConstant();[](#l19.69)
if (l && i && i->value() < l->value() && i->value() >= 0) {[](#l19.70)
return false;[](#l19.71)
} } +}[](#l19.72)
- if (!this->check_flag(NeedsRangeCheckFlag)) {
- return false;
- } + return true; } @@ -631,19 +653,25 @@ // of the inserted block, without recomputing the values of the other blocks // in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless. BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
- int bci = sux->bci();
- // critical edge splitting may introduce a goto after a if and array
- // bound check elimination may insert a predicate between the if and
- // goto. The bci of the goto can't be the one of the if otherwise
- // the state and bci are inconsistent and a deoptimization triggered
- // by the predicate would lead to incorrect execution/a crash.
- BlockBegin* new_sux = new BlockBegin(bci); // mark this block (special treatment when block order is computed) new_sux->set(critical_edge_split_flag); // This goto is not a safepoint. Goto* e = new Goto(sux, false);
- new_sux->set_state(s->copy(s->kind(), bci));
- e->set_state(s->copy(s->kind(), bci)); assert(new_sux->state()->locals_size() == s->locals_size(), "local size mismatch!"); assert(new_sux->state()->stack_size() == s->stack_size(), "stack size mismatch!"); assert(new_sux->state()->locks_size() == s->locks_size(), "locks size mismatch!"); @@ -960,15 +988,14 @@ BlockList* sux = NULL; if (begin != NULL) { sux = begin->successors();
- BlockList* sux = new BlockList(_begin->number_of_sux());
- for (int i = 0; i < _begin->number_of_sux(); i++) {
sux->append(_begin->sux_at(i));[](#l19.122)
- BlockList* sux = new BlockList(this->begin()->number_of_sux());
- for (int i = 0; i < this->begin()->number_of_sux(); i++) {
} } _sux = sux;sux->append(this->begin()->sux_at(i));[](#l19.125)
- _begin = begin; } @@ -1008,7 +1035,38 @@ } } +#ifdef ASSERT +// Constructor of Assert +Assert::Assert(Value x, Condition cond, bool unordered_is_true, Value y) : Instruction(illegalType)
- , _x(x)
- , _cond(cond)
- , _y(y) +{
- set_flag(UnorderedIsTrueFlag, unordered_is_true);
- assert(x->type()->tag() == y->type()->tag(), "types must match");
- pin();
- stringStream strStream;
- Compilation::current()->method()->print_name(&strStream); +
- stringStream strStream1;
- InstructionPrinter ip1(1, &strStream1);
- ip1.print_instr(x); +
- stringStream strStream2;
- InstructionPrinter ip2(1, &strStream2);
- ip2.print_instr(y); +
- stringStream ss;
- ss.print("Assertion %s %s %s in method %s", strStream1.as_string(), ip2.cond_name(cond), strStream2.as_string(), strStream.as_string()); +
- _message = ss.as_string(); +} +#endif + +void RangeCheckPredicate::check_state() {
- assert(state()->kind() != ValueStack::EmptyExceptionState && state()->kind() != ValueStack::ExceptionState, "will deopt with empty state"); +} void ProfileInvoke::state_values_do(ValueVisitor* f) { if (state() != NULL) state()->values_do(f);
--- a/src/share/vm/c1/c1_Instruction.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Instruction.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -110,6 +110,8 @@ class ProfileInvoke; class RuntimeCall; class MemBar; +class RangeCheckPredicate; +class Assert; // A Value is a reference to the instruction creating the value typedef Instruction* Value; @@ -210,6 +212,10 @@ virtual void do_ProfileInvoke (ProfileInvoke* x) = 0; virtual void do_RuntimeCall (RuntimeCall* x) = 0; virtual void do_MemBar (MemBar* x) = 0;
- virtual void do_RangeCheckPredicate(RangeCheckPredicate* x) = 0; +#ifdef ASSERT
- virtual void do_Assert (Assert* x) = 0; +#endif }; @@ -306,8 +312,9 @@ void update_exception_state(ValueStack* state);
- protected:
- BlockBegin* _block; // Block that contains this instruction + void set_type(ValueType* type) { assert(type != NULL, "type must exist"); _type = type; @@ -342,6 +349,9 @@ ThrowIncompatibleClassChangeErrorFlag, ProfileMDOFlag, IsLinkedInBlockFlag,
- NeedsRangeCheckFlag,
- InWorkListFlag,
- DeoptimizeOnException, InstructionLastFlag }; @@ -351,7 +361,7 @@ // 'globally' used condition values enum Condition {
}; // Instructions may be pinned for many reasons and under certain conditions @@ -381,6 +391,7 @@ , _pin_state(0) , _type(type) , _next(NULL)
- , _block(NULL) , _subst(NULL) , _flags(0) , _operand(LIR_OprFact::illegalOpr) @@ -399,11 +410,13 @@ int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; } void set_printable_bci(int bci) { _printable_bci = bci; }
- int dominator_depth(); int use_count() const { return _use_count; } int pin_state() const { return _pin_state; } bool is_pinned() const { return _pin_state != 0 || PinAllInstructions; } ValueType* type() const { return _type; }
- BlockBegin *block() const { return _block; }
- Instruction* prev(); // use carefully, expensive operation Instruction* next() const { return _next; } bool has_subst() const { return _subst != NULL; } Instruction* subst() { return _subst == NULL ? this : _subst->subst(); } @@ -432,6 +445,9 @@ assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next"); assert(next->can_be_linked(), "shouldn't link these instructions into list");
- BlockBegin *block = this->block();
- next->_block = block;
+ next->set_flag(Instruction::IsLinkedInBlockFlag, true); _next = next; return next; @@ -444,6 +460,29 @@ return set_next(next); }
- // when blocks are merged
- void fixup_block_pointers() {
- Instruction *cur = next()->next(); // next()'s block is set in set_next
- while (cur && cur->_block != block()) {
cur->_block = block();[](#l20.95)
cur = cur->next();[](#l20.96)
- }
- } +
- Instruction *insert_after(Instruction *i) {
- Instruction* n = _next;
- set_next(i);
- i->set_next(n);
- return _next;
- } +
- Instruction *insert_after_same_bci(Instruction *i) { +#ifndef PRODUCT
- i->set_printable_bci(printable_bci());
- return insert_after(i);
- } + void set_subst(Instruction* subst) { assert(subst == NULL || type()->base() == subst->type()->base() ||
@@ -452,6 +491,7 @@ } void set_exception_handlers(XHandlers xhandlers) { _exception_handlers = xhandlers; } void set_exception_state(ValueStack s) { check_state(s); _exception_state = s; }
- void set_state_before(ValueStack* s) { check_state(s); _state_before = s; } // machine-specifics void set_operand(LIR_Opr operand) { assert(operand != LIR_OprFact::illegalOpr, "operand must exist"); _operand = operand; } @@ -509,6 +549,11 @@ virtual ExceptionObject* as_ExceptionObject() { return NULL; } virtual UnsafeOp* as_UnsafeOp() { return NULL; } virtual ProfileInvoke* as_ProfileInvoke() { return NULL; }
- virtual RangeCheckPredicate* as_RangeCheckPredicate() { return NULL; } + +#ifdef ASSERT
- virtual Assert* as_Assert() { return NULL; } +#endif virtual void visit(InstructionVisitor* v) = 0; @@ -570,7 +615,6 @@ LEAF(Phi, Instruction) private:
- BlockBegin* _block; // the block to which the phi function belongs int _pf_flags; // the flags of the phi function int _index; // to value on operand stack (index < 0) or to local public: @@ -578,9 +622,9 @@ Phi(ValueType* type, BlockBegin* b, int index) : Instruction(type->base()) , _pf_flags(0)
- , _block(b) , _index(index) {
- _block = b; NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci())); if (type->is_illegal()) { make_illegal(); @@ -603,8 +647,6 @@ Value operand_at(int i) const; int operand_count() const;
- BlockBegin* block() const { return _block; } - void set(Flag f) { _pf_flags |= f; } void clear(Flag f) { _pf_flags &= ~f; } bool is_set(Flag f) const { return (_pf_flags & f) != 0; } @@ -670,6 +712,7 @@ pin(); }
- // generic virtual bool can_trap() const { return state_before() != NULL; } virtual void input_values_do(ValueVisitor* f) { /* no values */ } @@ -852,6 +895,7 @@ , _length(length) , _elt_type(elt_type) {
- set_flag(Instruction::NeedsRangeCheckFlag, true); ASSERT_VALUES } @@ -860,6 +904,7 @@ Value length() const { return _length; } BasicType elt_type() const { return _elt_type; }
- void clear_length() { _length = NULL; } // perform elimination of range checks involving constants bool compute_needs_range_check(); @@ -1524,6 +1569,7 @@ int _bci; // start-bci of block int _depth_first_number; // number of this block in a depth-first ordering int _linear_scan_number; // number of this block in linear-scan ordering
- int _dominator_depth; int _loop_depth; // the loop nesting level of this block int _loop_index; // number of the innermost loop of this block int _flags; // the flags associated with this block @@ -1535,6 +1581,7 @@ // SSA specific fields: (factor out later) BlockList _successors; // the successors of this block BlockList _predecessors; // the predecessors of this block
- BlockList _dominates; // list of blocks that are dominated by this block BlockBegin* _dominator; // the dominator of this block // SSA specific ends BlockEnd* _end; // the last instruction of this block @@ -1583,10 +1630,12 @@ , _linear_scan_number(-1) , _loop_depth(0) , _flags(0)
- , _dominator_depth(-1) , _dominator(NULL) , _end(NULL) , _predecessors(2) , _successors(2)
- , _dominates(2) , _exception_handlers(1) , _exception_states(NULL) , _exception_handler_pco(-1) @@ -1603,6 +1652,7 @@ , _total_preds(0) , _stores_to_locals() {
- _block = this;
#ifndef PRODUCT set_printable_bci(bci); #endif @@ -1612,8 +1662,10 @@ int block_id() const { return _block_id; } int bci() const { return _bci; } BlockList* successors() { return &_successors; }
- BlockList* dominates() { return &_dominates; } BlockBegin* dominator() const { return _dominator; } int loop_depth() const { return _loop_depth; }
- int dominator_depth() const { return _dominator_depth; } int depth_first_number() const { return _depth_first_number; } int linear_scan_number() const { return _linear_scan_number; } BlockEnd* end() const { return _end; } @@ -1634,6 +1686,7 @@ // manipulation void set_dominator(BlockBegin* dom) { _dominator = dom; } void set_loop_depth(int d) { _loop_depth = d; }
- void set_dominator_depth(int d) { _dominator_depth = d; } void set_depth_first_number(int dfn) { _depth_first_number = dfn; } void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; } void set_end(BlockEnd* end); @@ -1695,7 +1748,8 @@ parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan
- linear_scan_loop_end_flag = 1 << 10, // set during loop-detection for LinearScan
- donot_eliminate_range_checks = 1 << 11 // Should be try to eliminate range checks in this block
}; void set(Flag f) { _flags |= f; } @@ -1728,7 +1782,6 @@ BASE(BlockEnd, StateSplit) private:
- BlockBegin* _begin; BlockList* _sux; protected: @@ -1746,7 +1799,6 @@ // creation BlockEnd(ValueType* type, ValueStack* state_before, bool is_safepoint) : StateSplit(type, state_before)
- , _begin(NULL) , _sux(NULL) { set_flag(IsSafepointFlag, is_safepoint); @@ -1754,7 +1806,8 @@ // accessors bool is_safepoint() const { return check_flag(IsSafepointFlag); }
- BlockBegin* begin() const { return _begin; }
- // For compatibility with old code, for new code use block()
- BlockBegin* begin() const { return _block; } // manipulation void set_begin(BlockBegin* begin); @@ -1811,6 +1864,74 @@ void set_direction(Direction d) { _direction = d; } }; +#ifdef ASSERT +LEAF(Assert, Instruction)
- private:
- Value _x;
- Condition _cond;
- Value _y;
- char *_message; +
- public:
- // creation
- // unordered_is_true is valid for float/double compares only
- Assert(Value x, Condition cond, bool unordered_is_true, Value y); +
- // accessors
- Value x() const { return _x; }
- Condition cond() const { return _cond; }
- bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
- Value y() const { return _y; }
- const char *message() const { return _message; } +
- // generic
- virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); f->visit(&_y); } +}; +#endif + +LEAF(RangeCheckPredicate, StateSplit)
- private:
- Value _x;
- Condition _cond;
- Value _y; +
- void check_state(); +
- public:
- // creation
- // unordered_is_true is valid for float/double compares only
- RangeCheckPredicate(Value x, Condition cond, bool unordered_is_true, Value y, ValueStack* state) : StateSplit(illegalType)
- , _x(x)
- , _cond(cond)
- , _y(y)
- {
- ASSERT_VALUES
- set_flag(UnorderedIsTrueFlag, unordered_is_true);
- assert(x->type()->tag() == y->type()->tag(), "types must match");
- this->set_state(state);
- check_state();
- } +
- // Always deoptimize
- RangeCheckPredicate(ValueStack* state) : StateSplit(illegalType)
- {
- this->set_state(state);
- _x = _y = NULL;
- check_state();
- } +
- // accessors
- Value x() const { return _x; }
- Condition cond() const { return _cond; }
- bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
- Value y() const { return _y; } +
- void always_fail() { _x = _y = NULL; } +
- // generic
- virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_x); f->visit(&_y); }
- HASHING3(RangeCheckPredicate, true, x()->subst(), y()->subst(), cond()) +}; LEAF(If, BlockEnd) private:
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -57,6 +57,8 @@ case If::leq: return "<="; case If::gtr: return ">"; case If::geq: return ">=";
} ShouldNotReachHere(); return NULL; @@ -181,6 +183,11 @@ output()->put('['); print_value(indexed->index()); output()->put(']');
- if (indexed->length() != NULL) {
- output()->put('(');
- print_value(indexed->length());
- output()->put(')');
- } } @@ -373,6 +380,7 @@ void InstructionPrinter::do_LoadField(LoadField* x) { print_field(x); output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
- output()->print(" %s", x->field()->name()->as_utf8()); } @@ -381,6 +389,7 @@ output()->print(" := "); print_value(x->value()); output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
- output()->print(" %s", x->field()->name()->as_utf8()); } @@ -393,6 +402,9 @@ void InstructionPrinter::do_LoadIndexed(LoadIndexed* x) { print_indexed(x); output()->print(" (%c)", type2char(x->elt_type()));
- if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
- output()->print(" [rc]");
- } } @@ -401,6 +413,9 @@ output()->print(" := "); print_value(x->value()); output()->print(" (%c)", type2char(x->elt_type()));
- if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
- output()->print(" [rc]");
- } } void InstructionPrinter::do_NegateOp(NegateOp* x) { @@ -843,6 +858,25 @@ output()->put(')'); } +void InstructionPrinter::do_RangeCheckPredicate(RangeCheckPredicate* x) { +
- if (x->x() != NULL && x->y() != NULL) {
- output()->print("if ");
- print_value(x->x());
- output()->print(" %s ", cond_name(x->cond()));
- print_value(x->y());
- output()->print(" then deoptimize!");
- } else {
- output()->print("always deoptimize!");
- } +} + +void InstructionPrinter::do_Assert(Assert* x) {
- output()->print("assert ");
- print_value(x->x());
- output()->print(" %s ", cond_name(x->cond()));
- print_value(x->y()); +} void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { print_unsafe_object_op(x, "UnsafePrefetchWrite");
--- a/src/share/vm/c1/c1_InstructionPrinter.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -135,6 +135,8 @@ virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x);
--- a/src/share/vm/c1/c1_LIR.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_LIR.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -633,6 +633,7 @@ case lir_ushr: case lir_xadd: case lir_xchg:
- case lir_assert: { assert(op->as_Op2() != NULL, "must be"); LIR_Op2* op2 = (LIR_Op2*)op; @@ -1112,6 +1113,11 @@ } }
+#ifdef ASSERT +void LIR_OpAssert::emit_code(LIR_Assembler* masm) {
- masm->emit_assert(this); +} +#endif void LIR_OpDelay::emit_code(LIR_Assembler* masm) { masm->emit_delay(this); @@ -1771,6 +1777,8 @@ case lir_cas_int: s = "cas_int"; break; // LIR_OpProfileCall case lir_profile_call: s = "profile_call"; break;
// LIR_OpAssert[](#l23.27)
default: s = "illegal_op"; break; } @@ -2017,6 +2025,13 @@ out->print("[lbl:0x%x]", stub()->entry()); } +void LIR_OpAssert::print_instr(outputStream* out) const {case lir_assert: s = "assert"; break;[](#l23.28) case lir_none: ShouldNotReachHere();break;[](#l23.29)
- print_condition(out, condition()); out->print(" ");
- in_opr1()->print(out); out->print(" ");
- in_opr2()->print(out); out->print(", "");
- out->print(msg()); out->print("""); +} + void LIR_OpDelay::print_instr(outputStream* out) const { _op->print_on(out);
--- a/src/share/vm/c1/c1_LIR.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_LIR.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -881,6 +881,7 @@ class LIR_OpTypeCheck; class LIR_OpCompareAndSwap; class LIR_OpProfileCall; +class LIR_OpAssert; // LIR operation codes @@ -1000,6 +1001,9 @@ , begin_opMDOProfile , lir_profile_call , end_opMDOProfile
- , begin_opAssert
- , lir_assert
- , end_opAssert }; @@ -1135,6 +1139,7 @@ virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
- virtual LIR_OpAssert* as_OpAssert() { return NULL; } virtual void verify() const {} }; @@ -1623,7 +1628,7 @@ , _tmp3(LIR_OprFact::illegalOpr) , _tmp4(LIR_OprFact::illegalOpr) , _tmp5(LIR_OprFact::illegalOpr) {
} LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) @@ -1683,7 +1688,7 @@ LIR_Opr tmp4_opr() const { return _tmp4; } LIR_Opr tmp5_opr() const { return _tmp5; } LIR_Condition condition() const {
- assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
- assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
} void set_condition(LIR_Condition condition) { assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; @@ -1823,6 +1828,30 @@ CodeEmitInfo* call_info() const { return info(); } }; +#ifdef ASSERT +// LIR_OpAssert +class LIR_OpAssert : public LIR_Op2 {
- friend class LIR_OpVisitState; +
- private:
- const char* _msg;
- bool _halt; +
- public:
- LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
- : LIR_Op2(lir_assert, condition, opr1, opr2)
- , _halt(halt)
- , _msg(msg) {
- } +
- const char* msg() const { return _msg; }
- bool halt() const { return _halt; } +
- virtual void emit_code(LIR_Assembler* masm);
- virtual LIR_OpAssert* as_OpAssert() { return this; }
- virtual void print_instr(outputStream* out) const PRODUCT_RETURN; +}; +#endif // LIR_OpCompareAndSwap class LIR_OpCompareAndSwap : public LIR_Op { @@ -2196,6 +2225,9 @@ void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); } void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); } +#ifdef ASSERT
- void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); } +#endif }; void print_LIR(BlockList* blocks);
--- a/src/share/vm/c1/c1_LIRAssembler.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -210,6 +210,9 @@ void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack); void arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info); void intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op); +#ifdef ASSERT
- void emit_assert(LIR_OpAssert* op); +#endif void logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);
--- a/src/share/vm/c1/c1_LIRGenerator.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -403,6 +403,10 @@ CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { assert(state != NULL, "state must be defined"); +#ifndef PRODUCT
- state->verify(); +#endif + ValueStack* s = state; for_each_state(s) { if (s->kind() == ValueStack::EmptyExceptionState) { @@ -453,7 +457,7 @@ } }
- return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException)); } @@ -1792,11 +1796,18 @@ }
- bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception(); if (x->needs_null_check() && (needs_patching ||
MacroAssembler::needs_explicit_null_check(x->offset()))) {[](#l26.30)
MacroAssembler::needs_explicit_null_check(x->offset()) ||[](#l26.31)
stress_deopt)) {[](#l26.32)
- LIR_Opr obj = object.result();
- if (stress_deopt) {
obj = new_register(T_OBJECT);[](#l26.35)
__ move(LIR_OprFact::oopConst(NULL), obj);[](#l26.36)
- } // emit an explicit null check because the offset is too large
} LIR_Opr reg = rlock_result(x, field_type); @@ -1861,6 +1872,8 @@ void LIRGenerator::do_ArrayLength(ArrayLength* x) {
- if (x->use_count() == 0 && !x->can_trap()) return; + LIRItem array(x->array(), this); array.load_item(); LIR_Opr reg = rlock_result(x); @@ -1873,6 +1886,11 @@ } else { info = state_for(nc); }
- if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
LIR_Opr obj = new_register(T_OBJECT);[](#l26.58)
__ move(LIR_OprFact::oopConst(NULL), obj);[](#l26.59)
__ null_check(obj, new CodeEmitInfo(info));[](#l26.60)
- }
} __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); } @@ -1883,14 +1901,11 @@ LIRItem array(x->array(), this); LIRItem index(x->index(), this); LIRItem length(this);
- bool needs_range_check = true; -
- if (use_length) {
- needs_range_check = x->compute_needs_range_check();
- if (needs_range_check) {
length.set_instruction(x->length());[](#l26.74)
length.load_item();[](#l26.75)
- }
- bool needs_range_check = x->compute_needs_range_check(); +
- if (use_length && needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
} array.load_item(); @@ -1910,13 +1925,20 @@ } else { null_check_info = range_check_info; }
- if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
LIR_Opr obj = new_register(T_OBJECT);[](#l26.90)
__ move(LIR_OprFact::oopConst(NULL), obj);[](#l26.91)
__ null_check(obj, new CodeEmitInfo(null_check_info));[](#l26.92)
- }
} // emit array address setup early so it schedules better LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); if (GenerateRangeChecks && needs_range_check) {
- if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));[](#l26.102)
- } else if (use_length) { // TODO: use a (modified) version of array_range_check that does not require a // constant length to be loaded to a register __ cmp(lir_cond_belowEqual, length.result(), index.result()); @@ -2634,7 +2656,7 @@ LIR_Opr lock = new_register(T_INT); __ load_stack_address_monitor(0, lock);
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);[](#l26.111)
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));[](#l26.112) CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);[](#l26.113)
// receiver is guaranteed non-NULL so don't need CodeEmitInfo @@ -2644,7 +2666,7 @@ // increment invocation counters if needed if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
- CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
- CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); increment_invocation_counter(info); } @@ -3102,6 +3124,95 @@ } }
+void LIRGenerator::do_Assert(Assert *x) { +#ifdef ASSERT
- ValueTag tag = x->x()->type()->tag();
- If::Condition cond = x->cond(); +
- LIRItem xitem(x->x(), this);
- LIRItem yitem(x->y(), this);
- LIRItem* xin = &xitem;
- LIRItem* yin = &yitem; +
- assert(tag == intTag, "Only integer assertions are valid!"); +
- xin->load_item();
- yin->dont_load_item(); +
- set_no_result(x); +
- LIR_Opr left = xin->result();
- LIR_Opr right = yin->result(); +
- __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true); +#endif +} + + +void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { + +
- Instruction *a = x->x();
- Instruction *b = x->y();
- if (!a || StressRangeCheckElimination) {
- assert(!b || StressRangeCheckElimination, "B must also be null");
- __ jump(stub);
- } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
- int a_int = a->type()->as_IntConstant()->value();
- int b_int = b->type()->as_IntConstant()->value();
- switch(x->cond()) {
case Instruction::eql: ok = (a_int == b_int); break;[](#l26.173)
case Instruction::neq: ok = (a_int != b_int); break;[](#l26.174)
case Instruction::lss: ok = (a_int < b_int); break;[](#l26.175)
case Instruction::leq: ok = (a_int <= b_int); break;[](#l26.176)
case Instruction::gtr: ok = (a_int > b_int); break;[](#l26.177)
case Instruction::geq: ok = (a_int >= b_int); break;[](#l26.178)
case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;[](#l26.179)
case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;[](#l26.180)
default: ShouldNotReachHere();[](#l26.181)
- }
CodeEmitInfo *info = state_for(x, x->state());[](#l26.186)
CodeStub* stub = new PredicateFailedStub(info);[](#l26.187)
__ jump(stub);[](#l26.189)
- }
- } else { +
- ValueTag tag = x->x()->type()->tag();
- If::Condition cond = x->cond();
- LIRItem xitem(x->x(), this);
- LIRItem yitem(x->y(), this);
- LIRItem* xin = &xitem;
- LIRItem* yin = &yitem;
- __ cmp(lir_cond(cond), left, right);
- __ branch(lir_cond(cond), right->type(), stub);
- } +} + + LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { LIRItemList args(1); LIRItem value(arg1, this);
--- a/src/share/vm/c1/c1_LIRGenerator.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -412,6 +412,8 @@ case If::leq: l = lir_cond_lessEqual; break; case If::geq: l = lir_cond_greaterEqual; break; case If::gtr: l = lir_cond_greater; break;
- case If::aeq: l = lir_cond_aboveEqual; break;
- case If::beq: l = lir_cond_belowEqual; break; }; return l; } @@ -534,6 +536,8 @@ virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x);
- virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
- virtual void do_Assert (Assert* x); };
--- a/src/share/vm/c1/c1_LinearScan.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_LinearScan.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -6231,26 +6231,29 @@ assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
LIR_Op2* prev_cmp = NULL;[](#l28.7)
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {[](#l28.9)
prev_op = instructions->at(j);[](#l28.10)
if(prev_op->code() == lir_cmp) {[](#l28.11)
assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");[](#l28.12)
prev_cmp = (LIR_Op2*)prev_op;[](#l28.13)
assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");[](#l28.14)
if (prev_branch->stub() == NULL) {[](#l28.15)
LIR_Op2* prev_cmp = NULL;[](#l28.17)
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {[](#l28.19)
prev_op = instructions->at(j);[](#l28.20)
if (prev_op->code() == lir_cmp) {[](#l28.21)
assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");[](#l28.22)
prev_cmp = (LIR_Op2*)prev_op;[](#l28.23)
assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");[](#l28.24)
}[](#l28.25) }[](#l28.26)
}[](#l28.27)
assert(prev_cmp != NULL, "should have found comp instruction for branch");[](#l28.28)
if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {[](#l28.29)
TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));[](#l28.31)
// eliminate a conditional branch to the immediate successor[](#l28.33)
prev_branch->change_block(last_branch->block());[](#l28.34)
prev_branch->negate_cond();[](#l28.35)
prev_cmp->set_condition(prev_branch->cond());[](#l28.36)
instructions->truncate(instructions->length() - 1);[](#l28.37)
assert(prev_cmp != NULL, "should have found comp instruction for branch");[](#l28.38)
if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {[](#l28.39)
TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));[](#l28.41)
// eliminate a conditional branch to the immediate successor[](#l28.43)
prev_branch->change_block(last_branch->block());[](#l28.44)
prev_branch->negate_cond();[](#l28.45)
prev_cmp->set_condition(prev_branch->cond());[](#l28.46)
instructions->truncate(instructions->length() - 1);[](#l28.47)
}[](#l28.48) }[](#l28.49) }[](#l28.50) }[](#l28.51)
--- a/src/share/vm/c1/c1_Optimizer.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Optimizer.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -178,7 +178,7 @@ // 2) substitute conditional expression // with an IfOp followed by a Goto // cut if_ away and get node before
- Instruction* cur_end = if_->prev(); // append constants of true- and false-block if necessary // clone constants because original block must not be destroyed @@ -202,7 +202,7 @@ } // append Goto to successor
- ValueStack* state_before = if_->state_before(); Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint()); // prepare state for Goto @@ -367,10 +367,11 @@
#endif // find instruction before end & append first instruction of sux block
Instruction* prev = end->prev(block);[](#l29.25)
Instruction* prev = end->prev();[](#l29.26) Instruction* next = sux->next();[](#l29.27) assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd");[](#l29.28) prev->set_next(next);[](#l29.29)
prev->fixup_block_pointers();[](#l29.30) sux->disconnect_from_graph();[](#l29.31) block->set_end(sux->end());[](#l29.32) // add exception handlers of deleted block, if any[](#l29.33)
@@ -533,6 +534,8 @@ void do_ProfileInvoke (ProfileInvoke* x); void do_RuntimeCall (RuntimeCall* x); void do_MemBar (MemBar* x);
- void do_RangeCheckPredicate(RangeCheckPredicate* x);
- void do_Assert (Assert* x); }; @@ -714,6 +717,8 @@ void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {} void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {} void NullCheckVisitor::do_MemBar (MemBar* x) {} +void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {} +void NullCheckVisitor::do_Assert (Assert* x) {} void NullCheckEliminator::visit(Value* p) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/c1/c1_RangeCheckElimination.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -0,0 +1,1517 @@ +/*
- *
- *
- *
- *
- or visit www.oracle.com if you need additional information or have any
- *
- */ + +#include "precompiled.hpp" +#include "c1/c1_ValueStack.hpp" +#include "c1/c1_RangeCheckElimination.hpp" +#include "c1/c1_IR.hpp" +#include "c1/c1_Canonicalizer.hpp" +#include "c1/c1_ValueMap.hpp" +#include "ci/ciMethodData.hpp" +#include "runtime/deoptimization.hpp" + +// Macros for the Trace and the Assertion flag +#ifdef ASSERT +#define TRACE_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination) { code; } +#define ASSERT_RANGE_CHECK_ELIMINATION(code) if (AssertRangeCheckElimination) { code; } +#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination || AssertRangeCheckElimination) { code; } +#else +#define TRACE_RANGE_CHECK_ELIMINATION(code) +#define ASSERT_RANGE_CHECK_ELIMINATION(code) +#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code) +#endif + +// Entry point for the optimization +void RangeCheckElimination::eliminate(IR *ir) {
- bool do_elimination = ir->compilation()->has_access_indexed();
- ASSERT_RANGE_CHECK_ELIMINATION(do_elimination = true);
- if (do_elimination) {
- RangeCheckEliminator rce(ir);
- } +} + +// Constructor +RangeCheckEliminator::RangeCheckEliminator(IR *ir) :
- _bounds(Instruction::number_of_instructions(), NULL),
- _access_indexed_info(Instruction::number_of_instructions(), NULL) +{
- _visitor.set_range_check_eliminator(this);
- _ir = ir;
- _number_of_instructions = Instruction::number_of_instructions();
- _optimistic = ir->compilation()->is_optimistic(); +
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->print_cr("");
- tty->print_cr("Range check elimination");
- ir->method()->print_name(tty);
- tty->print_cr("");
- ); +
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->print_cr("optimistic=%d", (int)_optimistic);
- ); + +#ifdef ASSERT
- // Verifies several conditions that must be true on the IR-input. Only used for debugging purposes.
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->print_cr("Verification of IR . . .");
- );
- Verification verification(ir); +#endif +
- // Set process block flags
- // Optimization so a blocks is only processed if it contains an access indexed instruction or if
- // one of its children in the dominator tree contains an access indexed instruction.
- set_process_block_flags(ir->start()); +
- // Pass over instructions in the dominator tree
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->print_cr("Starting pass over dominator tree . . .")
- );
- calc_bounds(ir->start(), NULL); +
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->print_cr("Finished!")
- ); +} + +// Instruction specific work for some instructions +// Constant +void RangeCheckEliminator::Visitor::do_Constant(Constant *c) {
- IntConstant *ic = c->type()->as_IntConstant();
- if (ic != NULL) {
- int value = ic->value();
- _bound = new Bound(value, NULL, value, NULL);
- } +} + +// LogicOp +void RangeCheckEliminator::Visitor::do_LogicOp(LogicOp *lo) {
- if (lo->type()->as_IntType() && lo->op() == Bytecodes::_iand && (lo->x()->as_Constant() || lo->y()->as_Constant())) {
- int constant = 0;
- Constant *c = lo->x()->as_Constant();
- if (c != NULL) {
constant = c->type()->as_IntConstant()->value();[](#l30.118)
- } else {
constant = lo->y()->as_Constant()->type()->as_IntConstant()->value();[](#l30.120)
- }
- if (constant >= 0) {
_bound = new Bound(0, NULL, constant, NULL);[](#l30.123)
- }
- } +} + +// Phi +void RangeCheckEliminator::Visitor::do_Phi(Phi *phi) {
- if (!phi->type()->as_IntType() && !phi->type()->as_ObjectType()) return; +
- BlockBegin *block = phi->block();
- int op_count = phi->operand_count();
- bool has_upper = true;
- bool has_lower = true;
- assert(phi, "Phi must not be null");
- Bound *bound = NULL; +
- // TODO: support more difficult phis
- for (int i=0; i<op_count; i++) {
- Value v = phi->operand_at(i);
- // Check if instruction is connected with phi itself
- Op2 *op2 = v->as_Op2();
- if (op2 != NULL) {
Value x = op2->x();[](#l30.148)
Value y = op2->y();[](#l30.149)
if ((x == phi || y == phi)) {[](#l30.150)
Value other = x;[](#l30.151)
if (other == phi) {[](#l30.152)
other = y;[](#l30.153)
}[](#l30.154)
ArithmeticOp *ao = v->as_ArithmeticOp();[](#l30.155)
if (ao != NULL && ao->op() == Bytecodes::_iadd) {[](#l30.156)
assert(ao->op() == Bytecodes::_iadd, "Has to be add!");[](#l30.157)
if (ao->type()->as_IntType()) {[](#l30.158)
Constant *c = other->as_Constant();[](#l30.159)
if (c != NULL) {[](#l30.160)
assert(c->type()->as_IntConstant(), "Constant has to be of type integer");[](#l30.161)
int value = c->type()->as_IntConstant()->value();[](#l30.162)
if (value == 1) {[](#l30.163)
has_upper = false;[](#l30.164)
} else if (value > 1) {[](#l30.165)
// Overflow not guaranteed[](#l30.166)
has_upper = false;[](#l30.167)
has_lower = false;[](#l30.168)
} else if (value < 0) {[](#l30.169)
has_lower = false;[](#l30.170)
}[](#l30.171)
continue;[](#l30.172)
}[](#l30.173)
}[](#l30.174)
}[](#l30.175)
}[](#l30.176)
- }
- // No connection -> new bound
- Bound *v_bound = _rce->get_bound(v);
- Bound *cur_bound;
- int cur_constant = 0;
- Value cur_value = v;
- if (v->type()->as_IntConstant()) {
cur_constant = v->type()->as_IntConstant()->value();[](#l30.186)
cur_value = NULL;[](#l30.187)
- }
- if (!v_bound->has_upper() || !v_bound->has_lower()) {
cur_bound = new Bound(cur_constant, cur_value, cur_constant, cur_value);[](#l30.190)
- } else {
cur_bound = v_bound;[](#l30.192)
- }
- if (cur_bound) {
if (!bound) {[](#l30.195)
bound = cur_bound->copy();[](#l30.196)
} else {[](#l30.197)
bound->or_op(cur_bound);[](#l30.198)
}[](#l30.199)
- } else {
// No bound
bound = NULL;[](#l30.202)
break;[](#l30.203)
- }
- } +
- if (bound) {
- if (!has_upper) {
bound->remove_upper();[](#l30.209)
- }
- if (!has_lower) {
bound->remove_lower();[](#l30.212)
- }
- _bound = bound;
- } else {
- _bound = new Bound();
- } +} + + +// ArithmeticOp +void RangeCheckEliminator::Visitor::do_ArithmeticOp(ArithmeticOp *ao) {
- Value x = ao->x();
- Value y = ao->y(); +
- if (ao->op() == Bytecodes::_irem) {
- Bound* x_bound = _rce->get_bound(x);
- Bound* y_bound = _rce->get_bound(y);
- if (x_bound->lower() >= 0 && x_bound->lower_instr() == NULL && y->as_ArrayLength() != NULL) {
_bound = new Bound(0, NULL, -1, y);[](#l30.230)
- } else {
_bound = new Bound();[](#l30.232)
- }
- } else if (!x->as_Constant() || !y->as_Constant()) {
- assert(!x->as_Constant() || !y->as_Constant(), "One of the operands must be non-constant!");
- if (((x->as_Constant() || y->as_Constant()) && (ao->op() == Bytecodes::_iadd)) || (y->as_Constant() && ao->op() == Bytecodes::_isub)) {
assert(ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub, "Operand must be iadd or isub");[](#l30.237)
if (y->as_Constant()) {[](#l30.239)
Value tmp = x;[](#l30.240)
x = y;[](#l30.241)
y = tmp;[](#l30.242)
}[](#l30.243)
assert(x->as_Constant()->type()->as_IntConstant(), "Constant must be int constant!");[](#l30.244)
// Constant now in x[](#l30.246)
int const_value = x->as_Constant()->type()->as_IntConstant()->value();[](#l30.247)
if (ao->op() == Bytecodes::_iadd || const_value != min_jint) {[](#l30.248)
if (ao->op() == Bytecodes::_isub) {[](#l30.249)
const_value = -const_value;[](#l30.250)
}[](#l30.251)
Bound * bound = _rce->get_bound(y);[](#l30.253)
if (bound->has_upper() && bound->has_lower()) {[](#l30.254)
int new_lower = bound->lower() + const_value;[](#l30.255)
jlong new_lowerl = ((jlong)bound->lower()) + const_value;[](#l30.256)
int new_upper = bound->upper() + const_value;[](#l30.257)
jlong new_upperl = ((jlong)bound->upper()) + const_value;[](#l30.258)
if (((jlong)new_lower) == new_lowerl && ((jlong)new_upper == new_upperl)) {[](#l30.260)
Bound *newBound = new Bound(new_lower, bound->lower_instr(), new_upper, bound->upper_instr());[](#l30.261)
_bound = newBound;[](#l30.262)
} else {[](#l30.263)
// overflow[](#l30.264)
_bound = new Bound();[](#l30.265)
}[](#l30.266)
} else {[](#l30.267)
_bound = new Bound();[](#l30.268)
}[](#l30.269)
} else {[](#l30.270)
_bound = new Bound();[](#l30.271)
}[](#l30.272)
- } else {
Bound *bound = _rce->get_bound(x);[](#l30.274)
if (ao->op() == Bytecodes::_isub) {[](#l30.275)
if (bound->lower_instr() == y) {[](#l30.276)
_bound = new Bound(Instruction::geq, NULL, bound->lower());[](#l30.277)
} else {[](#l30.278)
_bound = new Bound();[](#l30.279)
}[](#l30.280)
} else {[](#l30.281)
_bound = new Bound();[](#l30.282)
}[](#l30.283)
- }
- } +} + +// IfOp +void RangeCheckEliminator::Visitor::do_IfOp(IfOp *ifOp) +{
- if (ifOp->tval()->type()->as_IntConstant() && ifOp->fval()->type()->as_IntConstant()) {
- int min = ifOp->tval()->type()->as_IntConstant()->value();
- int max = ifOp->fval()->type()->as_IntConstant()->value();
- if (min > max) {
// min ^= max ^= min ^= max;[](#l30.295)
int tmp = min;[](#l30.296)
min = max;[](#l30.297)
max = tmp;[](#l30.298)
- }
- _bound = new Bound(min, NULL, max, NULL);
- } +} + +// Get bound. Returns the current bound on Value v. Normally this is the topmost element on the bound stack. +RangeCheckEliminator::Bound *RangeCheckEliminator::get_bound(Value v) {
- // Wrong type or NULL -> No bound
- if (!v || (!v->type()->as_IntType() && !v->type()->as_ObjectType())) return NULL; +
- if (!_bounds[v->id()]) {
- // First (default) bound is calculated
- // Create BoundStack
- _bounds[v->id()] = new BoundStack();
- _visitor.clear_bound();
- Value visit_value = v;
- visit_value->visit(&_visitor);
- Bound *bound = _visitor.bound();
- if (bound) {
_bounds[v->id()]->push(bound);[](#l30.318)
- }
- if (_bounds[v->id()]->length() == 0) {
assert(!(v->as_Constant() && v->type()->as_IntConstant()), "constants not handled here");[](#l30.321)
_bounds[v->id()]->push(new Bound());[](#l30.322)
- }
- } else if (_bounds[v->id()]->length() == 0) {
- // To avoid endless loops, bound is currently in calculation -> nothing known about it
- return new Bound();
- } +
- // Return bound
- return _bounds[v->id()]->top(); +} + +// Update bound +void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant) {
- if (cond == Instruction::gtr) {
- cond = Instruction::geq;
- constant++;
- } else if (cond == Instruction::lss) {
- cond = Instruction::leq;
- constant--;
- }
- Bound *bound = new Bound(cond, value, constant);
- update_bound(pushed, v, bound); +} + +// Checks for loop invariance. Returns true if the instruction is outside of the loop which is identified by loop_header. +bool RangeCheckEliminator::loop_invariant(BlockBegin *loop_header, Instruction *instruction) {
- assert(loop_header, "Loop header must not be null!");
- if (!instruction) return true;
- return instruction->dominator_depth() < loop_header->dominator_depth(); +} + +// Update bound. Pushes a new bound onto the stack. Tries to do a conjunction with the current bound. +void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Bound *bound) {
- if (v->as_Constant()) {
- // No bound update for constants
- return;
- }
- if (!_bounds[v->id()]) {
- get_bound(v);
- assert(_bounds[v->id()], "Now Stack must exist");
- }
- Bound *top = NULL;
- if (_bounds[v->id()]->length() > 0) {
- top = _bounds[v->id()]->top();
- }
- if (top) {
- bound->and_op(top);
- }
- _bounds[v->id()]->push(bound);
- pushed.append(v->id()); +} + +// Add instruction + idx for in block motion +void RangeCheckEliminator::add_access_indexed_info(InstructionList &indices, int idx, Value instruction, AccessIndexed *ai) {
- int id = instruction->id();
- AccessIndexedInfo *aii = _access_indexed_info[id];
- if (aii == NULL) {
- aii = new AccessIndexedInfo();
- _access_indexed_info[id] = aii;
- indices.append(instruction);
- aii->_min = idx;
- aii->_max = idx;
- aii->_list = new AccessIndexedList();
- } else if (idx >= aii->_min && idx <= aii->_max) {
- remove_range_check(ai);
- return;
- }
- aii->_min = MIN2(aii->_min, idx);
- aii->_max = MAX2(aii->_max, idx);
- aii->_list->append(ai); +} + +// In block motion. Tries to reorder checks in order to reduce some of them. +// Example: +// a[i] = 0; +// a[i+2] = 0; +// a[i+1] = 0; +// In this example the check for a[i+1] would be considered as unnecessary during the first iteration. +// After this i is only checked once for i >= 0 and i+2 < a.length before the first array access. If this +// check fails, deoptimization is called. +void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays) {
- InstructionList indices; +
- // Now iterate over all arrays
- for (int i=0; i<arrays.length(); i++) {
- int max_constant = -1;
- AccessIndexedList list_constant;
- Value array = arrays.at(i);
- // For all AccessIndexed-instructions in this block concerning the current array.
- for(int j=0; j<accessIndexed.length(); j++) {
AccessIndexed *ai = accessIndexed.at(j);[](#l30.413)
if (ai->array() != array || !ai->check_flag(Instruction::NeedsRangeCheckFlag)) continue;[](#l30.414)
Value index = ai->index();[](#l30.416)
Constant *c = index->as_Constant();[](#l30.417)
if (c != NULL) {[](#l30.418)
int constant_value = c->type()->as_IntConstant()->value();[](#l30.419)
if (constant_value >= 0) {[](#l30.420)
if (constant_value <= max_constant) {[](#l30.421)
// No range check needed for this[](#l30.422)
remove_range_check(ai);[](#l30.423)
} else {[](#l30.424)
max_constant = constant_value;[](#l30.425)
list_constant.append(ai);[](#l30.426)
}[](#l30.427)
}[](#l30.428)
} else {[](#l30.429)
int last_integer = 0;[](#l30.430)
Instruction *last_instruction = index;[](#l30.431)
int base = 0;[](#l30.432)
ArithmeticOp *ao = index->as_ArithmeticOp();[](#l30.433)
while (ao != NULL && (ao->x()->as_Constant() || ao->y()->as_Constant()) && (ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub)) {[](#l30.435)
c = ao->y()->as_Constant();[](#l30.436)
Instruction *other = ao->x();[](#l30.437)
if (!c && ao->op() == Bytecodes::_iadd) {[](#l30.438)
c = ao->x()->as_Constant();[](#l30.439)
other = ao->y();[](#l30.440)
}[](#l30.441)
if (c) {[](#l30.443)
int value = c->type()->as_IntConstant()->value();[](#l30.444)
if (value != min_jint) {[](#l30.445)
if (ao->op() == Bytecodes::_isub) {[](#l30.446)
value = -value;[](#l30.447)
}[](#l30.448)
base += value;[](#l30.449)
last_integer = base;[](#l30.450)
last_instruction = other;[](#l30.451)
}[](#l30.452)
index = other;[](#l30.453)
} else {[](#l30.454)
break;[](#l30.455)
}[](#l30.456)
ao = index->as_ArithmeticOp();[](#l30.457)
}[](#l30.458)
add_access_indexed_info(indices, last_integer, last_instruction, ai);[](#l30.459)
}[](#l30.460)
- }
- // Iterate over all different indices
- if (_optimistic) {
for (int i=0; i<indices.length(); i++) {[](#l30.465)
Instruction *index_instruction = indices.at(i);[](#l30.466)
AccessIndexedInfo *info = _access_indexed_info[index_instruction->id()];[](#l30.467)
assert(info != NULL, "Info must not be null");[](#l30.468)
// if idx < 0, max > 0, max + idx may fall between 0 and[](#l30.470)
// length-1 and if min < 0, min + idx may overflow and be >=[](#l30.471)
// 0. The predicate wouldn't trigger but some accesses could[](#l30.472)
// be with a negative index. This test guarantees that for the[](#l30.473)
// min and max value that are kept the predicate can't let[](#l30.474)
// some incorrect accesses happen.[](#l30.475)
bool range_cond = (info->_max < 0 || info->_max + min_jint <= info->_min);[](#l30.476)
// Generate code only if more than 2 range checks can be eliminated because of that.[](#l30.478)
// 2 because at least 2 comparisons are done[](#l30.479)
if (info->_list->length() > 2 && range_cond) {[](#l30.480)
AccessIndexed *first = info->_list->at(0);[](#l30.481)
Instruction *insert_position = first->prev();[](#l30.482)
assert(insert_position->next() == first, "prev was calculated");[](#l30.483)
ValueStack *state = first->state_before();[](#l30.484)
// Load min Constant[](#l30.486)
Constant *min_constant = NULL;[](#l30.487)
if (info->_min != 0) {[](#l30.488)
min_constant = new Constant(new IntConstant(info->_min));[](#l30.489)
NOT_PRODUCT(min_constant->set_printable_bci(first->printable_bci()));[](#l30.490)
insert_position = insert_position->insert_after(min_constant);[](#l30.491)
}[](#l30.492)
// Load max Constant[](#l30.494)
Constant *max_constant = NULL;[](#l30.495)
if (info->_max != 0) {[](#l30.496)
max_constant = new Constant(new IntConstant(info->_max));[](#l30.497)
NOT_PRODUCT(max_constant->set_printable_bci(first->printable_bci()));[](#l30.498)
insert_position = insert_position->insert_after(max_constant);[](#l30.499)
}[](#l30.500)
// Load array length[](#l30.502)
Value length_instr = first->length();[](#l30.503)
if (!length_instr) {[](#l30.504)
ArrayLength *length = new ArrayLength(array, first->state_before()->copy());[](#l30.505)
length->set_exception_state(length->state_before());[](#l30.506)
length->set_flag(Instruction::DeoptimizeOnException, true);[](#l30.507)
insert_position = insert_position->insert_after_same_bci(length);[](#l30.508)
length_instr = length;[](#l30.509)
}[](#l30.510)
// Calculate lower bound[](#l30.512)
Instruction *lower_compare = index_instruction;[](#l30.513)
if (min_constant) {[](#l30.514)
ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, min_constant, lower_compare, false, NULL);[](#l30.515)
insert_position = insert_position->insert_after_same_bci(ao);[](#l30.516)
lower_compare = ao;[](#l30.517)
}[](#l30.518)
// Calculate upper bound[](#l30.520)
Instruction *upper_compare = index_instruction;[](#l30.521)
if (max_constant) {[](#l30.522)
ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, max_constant, upper_compare, false, NULL);[](#l30.523)
insert_position = insert_position->insert_after_same_bci(ao);[](#l30.524)
upper_compare = ao;[](#l30.525)
}[](#l30.526)
// Trick with unsigned compare is done[](#l30.528)
int bci = NOT_PRODUCT(first->printable_bci()) PRODUCT_ONLY(-1);[](#l30.529)
insert_position = predicate(upper_compare, Instruction::aeq, length_instr, state, insert_position, bci);[](#l30.530)
insert_position = predicate_cmp_with_const(lower_compare, Instruction::leq, -1, state, insert_position);[](#l30.531)
for (int j = 0; j<info->_list->length(); j++) {[](#l30.532)
AccessIndexed *ai = info->_list->at(j);[](#l30.533)
remove_range_check(ai);[](#l30.534)
}[](#l30.535)
}[](#l30.536)
_access_indexed_info[index_instruction->id()] = NULL;[](#l30.537)
}[](#l30.538)
indices.clear();[](#l30.539)
if (list_constant.length() > 1) {[](#l30.541)
AccessIndexed *first = list_constant.at(0);[](#l30.542)
Instruction *insert_position = first->prev();[](#l30.543)
ValueStack *state = first->state_before();[](#l30.544)
// Load max Constant[](#l30.545)
Constant *constant = new Constant(new IntConstant(max_constant));[](#l30.546)
NOT_PRODUCT(constant->set_printable_bci(first->printable_bci()));[](#l30.547)
insert_position = insert_position->insert_after(constant);[](#l30.548)
Instruction *compare_instr = constant;[](#l30.549)
Value length_instr = first->length();[](#l30.550)
if (!length_instr) {[](#l30.551)
ArrayLength *length = new ArrayLength(array, state->copy());[](#l30.552)
length->set_exception_state(length->state_before());[](#l30.553)
length->set_flag(Instruction::DeoptimizeOnException, true);[](#l30.554)
insert_position = insert_position->insert_after_same_bci(length);[](#l30.555)
length_instr = length;[](#l30.556)
}[](#l30.557)
// Compare for greater or equal to array length[](#l30.558)
insert_position = predicate(compare_instr, Instruction::geq, length_instr, state, insert_position);[](#l30.559)
for (int j = 0; j<list_constant.length(); j++) {[](#l30.560)
AccessIndexed *ai = list_constant.at(j);[](#l30.561)
remove_range_check(ai);[](#l30.562)
}[](#l30.563)
}[](#l30.564)
- }
- } +} + +bool RangeCheckEliminator::set_process_block_flags(BlockBegin *block) {
- Instruction *cur = block;
- bool process = false; +
- while (cur) {
- process |= (cur->as_AccessIndexed() != NULL);
- cur = cur->next();
- } +
- BlockList *dominates = block->dominates();
- for (int i=0; ilength(); i++) {
- BlockBegin *next = dominates->at(i);
- process |= set_process_block_flags(next);
- } +
- if (!process) {
- block->set(BlockBegin::donot_eliminate_range_checks);
- }
- return process; +} + +bool RangeCheckEliminator::is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper) {
- bool upper_check = true;
- assert(lower_instr || lower >= 0, "If no lower_instr present, lower must be greater 0");
- assert(!lower_instr || lower_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
- assert(!upper_instr || upper_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
- assert(array_instr, "Array instruction must exist");
- assert(array_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
- assert(!length_instr || length_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller"); +
- if (upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr) {
- // static check
- if (upper >= 0) return false; // would always trigger a deopt:
// array_length + x >= array_length, x >= 0 is always true[](#l30.602)
- upper_check = false;
- }
- if (lower_instr && lower_instr->as_ArrayLength() && lower_instr->as_ArrayLength()->array() == array_instr) {
- if (lower > 0) return false;
- }
- // No upper check required -> skip
- if (upper_check && upper_instr && upper_instr->type()->as_ObjectType() && upper_instr == array_instr) {
- // upper_instr is object means that the upper bound is the length
- // of the upper_instr.
- return false;
- }
- return true; +} + +Instruction* RangeCheckEliminator::insert_after(Instruction* insert_position, Instruction* instr, int bci) {
- if (bci != -1) {
- NOT_PRODUCT(instr->set_printable_bci(bci));
- return insert_position->insert_after(instr);
- } else {
- return insert_position->insert_after_same_bci(instr);
- } +} + +Instruction* RangeCheckEliminator::predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
- RangeCheckPredicate *deoptimize = new RangeCheckPredicate(left, cond, true, right, state->copy());
- return insert_after(insert_position, deoptimize, bci); +} + +Instruction* RangeCheckEliminator::predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
- Constant *const_instr = new Constant(new IntConstant(constant));
- insert_position = insert_after(insert_position, const_instr, bci);
- return predicate(instr, cond, const_instr, state, insert_position); +} + +Instruction* RangeCheckEliminator::predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
- Constant *constant = new Constant(new IntConstant(left_const));
- insert_position = insert_after(insert_position, constant, bci);
- ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, left, false, NULL);
- insert_position = insert_position->insert_after_same_bci(ao);
- return predicate(ao, cond, right, state, insert_position); +} + +Instruction* RangeCheckEliminator::predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
- Constant *const_instr = new Constant(new IntConstant(constant));
- insert_position = insert_after(insert_position, const_instr, bci);
- return predicate_add(left, left_const, cond, const_instr, state, insert_position); +} + +// Insert deoptimization, returns true if sucessful or false if range check should not be removed +void RangeCheckEliminator::insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper, AccessIndexed *ai) {
- assert(is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, lower, upper_instr, upper), "should have been tested before");
- bool upper_check = !(upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr); +
- int bci = NOT_PRODUCT(ai->printable_bci()) PRODUCT_ONLY(-1);
- if (lower_instr) {
- assert(!lower_instr->type()->as_ObjectType(), "Must not be object type");
- if (lower == 0) {
// Compare for less than 0[](#l30.660)
insert_position = predicate_cmp_with_const(lower_instr, Instruction::lss, 0, state, insert_position, bci);[](#l30.661)
- } else if (lower > 0) {
// Compare for smaller 0[](#l30.663)
insert_position = predicate_add_cmp_with_const(lower_instr, lower, Instruction::lss, 0, state, insert_position, bci);[](#l30.664)
- } else {
assert(lower < 0, "");[](#l30.666)
// Add 1[](#l30.667)
lower++;[](#l30.668)
lower = -lower;[](#l30.669)
// Compare for smaller or equal 0[](#l30.670)
insert_position = predicate_cmp_with_const(lower_instr, Instruction::leq, lower, state, insert_position, bci);[](#l30.671)
- }
- } +
- // We need to know length of array
- if (!length_instr) {
- // Load length if necessary
- ArrayLength *length = new ArrayLength(array_instr, state->copy());
- NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));
- length->set_exception_state(length->state_before());
- length->set_flag(Instruction::DeoptimizeOnException, true);
- insert_position = insert_position->insert_after(length);
- length_instr = length;
- } +
- // No upper check required -> skip
- if (!upper_check) return; +
- if (!upper_instr) {
- // Compare for geq array.length
- insert_position = predicate_cmp_with_const(length_instr, Instruction::leq, upper, state, insert_position, bci);
- } else {
- if (upper_instr->type()->as_ObjectType()) {
assert(state, "must not be null");[](#l30.694)
assert(upper_instr != array_instr, "should be");[](#l30.695)
ArrayLength *length = new ArrayLength(upper_instr, state->copy());[](#l30.696)
NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));[](#l30.697)
length->set_flag(Instruction::DeoptimizeOnException, true);[](#l30.698)
length->set_exception_state(length->state_before());[](#l30.699)
insert_position = insert_position->insert_after(length);[](#l30.700)
upper_instr = length;[](#l30.701)
- }
- assert(upper_instr->type()->as_IntType(), "Must not be object type!");
- if (upper == 0) {
// Compare for geq array.length[](#l30.706)
insert_position = predicate(upper_instr, Instruction::geq, length_instr, state, insert_position, bci);[](#l30.707)
- } else if (upper < 0) {
// Compare for geq array.length[](#l30.709)
insert_position = predicate_add(upper_instr, upper, Instruction::geq, length_instr, state, insert_position, bci);[](#l30.710)
- } else {
assert(upper > 0, "");[](#l30.712)
upper = -upper;[](#l30.713)
// Compare for geq array.length[](#l30.714)
insert_position = predicate_add(length_instr, upper, Instruction::leq, upper_instr, state, insert_position, bci);[](#l30.715)
- }
- } +} + +// Add if condition +void RangeCheckEliminator::add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition) {
- if (y->as_Constant()) return; +
- int const_value = 0;
- Value instr_value = x;
- Constant *c = x->as_Constant();
- ArithmeticOp *ao = x->as_ArithmeticOp(); +
- if (c != NULL) {
- const_value = c->type()->as_IntConstant()->value();
- instr_value = NULL;
- } else if (ao != NULL && (!ao->x()->as_Constant() || !ao->y()->as_Constant()) && ((ao->op() == Bytecodes::_isub && ao->y()->as_Constant()) || ao->op() == Bytecodes::_iadd)) {
- assert(!ao->x()->as_Constant() || !ao->y()->as_Constant(), "At least one operator must be non-constant!");
- assert(ao->op() == Bytecodes::_isub || ao->op() == Bytecodes::_iadd, "Operation has to be add or sub!");
- c = ao->x()->as_Constant();
- if (c != NULL) {
const_value = c->type()->as_IntConstant()->value();[](#l30.737)
instr_value = ao->y();[](#l30.738)
- } else {
c = ao->y()->as_Constant();[](#l30.740)
if (c != NULL) {[](#l30.741)
const_value = c->type()->as_IntConstant()->value();[](#l30.742)
instr_value = ao->x();[](#l30.743)
}[](#l30.744)
- }
- if (ao->op() == Bytecodes::_isub) {
assert(ao->y()->as_Constant(), "1 - x not supported, only x - 1 is valid!");[](#l30.747)
if (const_value > min_jint) {[](#l30.748)
const_value = -const_value;[](#l30.749)
} else {[](#l30.750)
const_value = 0;[](#l30.751)
instr_value = x;[](#l30.752)
}[](#l30.753)
- }
- } +
- update_bound(pushed, y, condition, instr_value, const_value); +} + +// Process If +void RangeCheckEliminator::process_if(IntegerStack &pushed, BlockBegin *block, If *cond) {
- // Only if we are direct true / false successor and NOT both ! (even this may occur)
- if ((cond->tsux() == block || cond->fsux() == block) && cond->tsux() != cond->fsux()) {
- Instruction::Condition condition = cond->cond();
- if (cond->fsux() == block) {
condition = Instruction::negate(condition);[](#l30.766)
- }
- Value x = cond->x();
- Value y = cond->y();
- if (x->type()->as_IntType() && y->type()->as_IntType()) {
add_if_condition(pushed, y, x, condition);[](#l30.771)
add_if_condition(pushed, x, y, Instruction::mirror(condition));[](#l30.772)
- }
- } +} + +// Process access indexed +void RangeCheckEliminator::process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai) {
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->fill_to(block->dominator_depth()*2)
- );
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->print_cr("Access indexed: index=%d length=%d", ai->index()->id(), ai->length()->id())
- ); +
- if (ai->check_flag(Instruction::NeedsRangeCheckFlag)) {
- Bound *index_bound = get_bound(ai->index());
- if (!index_bound->has_lower() || !index_bound->has_upper()) {
TRACE_RANGE_CHECK_ELIMINATION([](#l30.789)
tty->fill_to(block->dominator_depth()*2);[](#l30.790)
tty->print_cr("Index instruction %d has no lower and/or no upper bound!", ai->index()->id())[](#l30.791)
);[](#l30.792)
return;[](#l30.793)
- }
- Bound *array_bound;
- if (ai->length()) {
array_bound = get_bound(ai->length());[](#l30.798)
- } else {
array_bound = get_bound(ai->array());[](#l30.800)
- }
- if (in_array_bound(index_bound, ai->array()) ||
(index_bound && array_bound && index_bound->is_smaller(array_bound) && !index_bound->lower_instr() && index_bound->lower() >= 0)) {[](#l30.804)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.805)
tty->fill_to(block->dominator_depth()*2);[](#l30.806)
tty->print_cr("Bounds check for instruction %d in block B%d can be fully eliminated!", ai->id(), ai->block()->block_id())[](#l30.807)
);[](#l30.808)
remove_range_check(ai);[](#l30.810)
- } else if (_optimistic && loop_header) {
assert(ai->array(), "Array must not be null!");[](#l30.812)
assert(ai->index(), "Index must not be null!");[](#l30.813)
// Array instruction[](#l30.815)
Instruction *array_instr = ai->array();[](#l30.816)
if (!loop_invariant(loop_header, array_instr)) {[](#l30.817)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.818)
tty->fill_to(block->dominator_depth()*2);[](#l30.819)
tty->print_cr("Array %d is not loop invariant to header B%d", ai->array()->id(), loop_header->block_id())[](#l30.820)
);[](#l30.821)
return;[](#l30.822)
}[](#l30.823)
// Lower instruction[](#l30.825)
Value index_instr = ai->index();[](#l30.826)
Value lower_instr = index_bound->lower_instr();[](#l30.827)
if (!loop_invariant(loop_header, lower_instr)) {[](#l30.828)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.829)
tty->fill_to(block->dominator_depth()*2);[](#l30.830)
tty->print_cr("Lower instruction %d not loop invariant!", lower_instr->id())[](#l30.831)
);[](#l30.832)
return;[](#l30.833)
}[](#l30.834)
if (!lower_instr && index_bound->lower() < 0) {[](#l30.835)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.836)
tty->fill_to(block->dominator_depth()*2);[](#l30.837)
tty->print_cr("Lower bound smaller than 0 (%d)!", index_bound->lower())[](#l30.838)
);[](#l30.839)
return;[](#l30.840)
}[](#l30.841)
// Upper instruction[](#l30.843)
Value upper_instr = index_bound->upper_instr();[](#l30.844)
if (!loop_invariant(loop_header, upper_instr)) {[](#l30.845)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.846)
tty->fill_to(block->dominator_depth()*2);[](#l30.847)
tty->print_cr("Upper instruction %d not loop invariant!", upper_instr->id())[](#l30.848)
);[](#l30.849)
return;[](#l30.850)
}[](#l30.851)
// Length instruction[](#l30.853)
Value length_instr = ai->length();[](#l30.854)
if (!loop_invariant(loop_header, length_instr)) {[](#l30.855)
// Generate length instruction yourself
length_instr = NULL;[](#l30.857)
}[](#l30.858)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.860)
tty->fill_to(block->dominator_depth()*2);[](#l30.861)
tty->print_cr("LOOP INVARIANT access indexed %d found in block B%d!", ai->id(), ai->block()->block_id())[](#l30.862)
);[](#l30.863)
BlockBegin *pred_block = loop_header->dominator();[](#l30.865)
assert(pred_block != NULL, "Every loop header has a dominator!");[](#l30.866)
BlockEnd *pred_block_end = pred_block->end();[](#l30.867)
Instruction *insert_position = pred_block_end->prev();[](#l30.868)
ValueStack *state = pred_block_end->state_before();[](#l30.869)
if (pred_block_end->as_Goto() && state == NULL) state = pred_block_end->state();[](#l30.870)
assert(state, "State must not be null");[](#l30.871)
// Add deoptimization to dominator of loop header[](#l30.873)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.874)
tty->fill_to(block->dominator_depth()*2);[](#l30.875)
tty->print_cr("Inserting deopt at bci %d in block B%d!", state->bci(), insert_position->block()->block_id())[](#l30.876)
);[](#l30.877)
if (!is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper())) {[](#l30.879)
TRACE_RANGE_CHECK_ELIMINATION([](#l30.880)
tty->fill_to(block->dominator_depth()*2);[](#l30.881)
tty->print_cr("Could not eliminate because of static analysis!")[](#l30.882)
);[](#l30.883)
return;[](#l30.884)
}[](#l30.885)
insert_deoptimization(state, insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper(), ai);[](#l30.887)
// Finally remove the range check
remove_range_check(ai);[](#l30.890)
- }
- } +} + +void RangeCheckEliminator::remove_range_check(AccessIndexed *ai) {
- ai->set_flag(Instruction::NeedsRangeCheckFlag, false);
- // no range check, no need for the length instruction anymore
- ai->clear_length(); +
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->fill_to(ai->dominator_depth()*2);
- tty->print_cr("Range check for instruction %d eliminated!", ai->id());
- ); +
- ASSERT_RANGE_CHECK_ELIMINATION(
- Value array_length = ai->length();
- if (!array_length) {
array_length = ai->array();[](#l30.908)
assert(array_length->type()->as_ObjectType(), "Has to be object type!");[](#l30.909)
- }
- int cur_constant = -1;
- Value cur_value = array_length;
- if (cur_value->type()->as_IntConstant()) {
cur_constant += cur_value->type()->as_IntConstant()->value();[](#l30.914)
cur_value = NULL;[](#l30.915)
- }
- Bound *new_index_bound = new Bound(0, NULL, cur_constant, cur_value);
- add_assertions(new_index_bound, ai->index(), ai);
- ); +} + +// Calculate bounds for instruction in this block and children blocks in the dominator tree +void RangeCheckEliminator::calc_bounds(BlockBegin *block, BlockBegin *loop_header) {
- // Ensures a valid loop_header
- assert(!loop_header || loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Loop header has to be real !"); +
- // Tracing output
- TRACE_RANGE_CHECK_ELIMINATION(
- tty->fill_to(block->dominator_depth()*2);
- tty->print_cr("Block B%d", block->block_id());
- ); +
- // Pushed stack for conditions
- IntegerStack pushed;
- // Process If
- BlockBegin *parent = block->dominator();
- if (parent != NULL) {
- If *cond = parent->end()->as_If();
- if (cond != NULL) {
process_if(pushed, block, cond);[](#l30.940)
- }
- } +
- // Interate over current block
- InstructionList arrays;
- AccessIndexedList accessIndexed;
- Instruction *cur = block; +
- while (cur) {
- // Ensure cur wasn't inserted during the elimination
- if (cur->id() < this->_bounds.length()) {
// Process only if it is an access indexed instruction[](#l30.952)
AccessIndexed *ai = cur->as_AccessIndexed();[](#l30.953)
if (ai != NULL) {[](#l30.954)
process_access_indexed(loop_header, block, ai);[](#l30.955)
accessIndexed.append(ai);[](#l30.956)
if (!arrays.contains(ai->array())) {[](#l30.957)
arrays.append(ai->array());[](#l30.958)
}[](#l30.959)
Bound *b = get_bound(ai->index());[](#l30.960)
if (!b->lower_instr()) {[](#l30.961)
// Lower bound is constant[](#l30.962)
update_bound(pushed, ai->index(), Instruction::geq, NULL, 0);[](#l30.963)
}[](#l30.964)
if (!b->has_upper()) {[](#l30.965)
if (ai->length() && ai->length()->type()->as_IntConstant()) {[](#l30.966)
int value = ai->length()->type()->as_IntConstant()->value();[](#l30.967)
update_bound(pushed, ai->index(), Instruction::lss, NULL, value);[](#l30.968)
} else {[](#l30.969)
// Has no upper bound[](#l30.970)
Instruction *instr = ai->length();[](#l30.971)
if (instr != NULL) instr = ai->array();[](#l30.972)
update_bound(pushed, ai->index(), Instruction::lss, instr, 0);[](#l30.973)
}[](#l30.974)
}[](#l30.975)
}[](#l30.976)
- }
- cur = cur->next();
- } +
- // Output current condition stack
- TRACE_RANGE_CHECK_ELIMINATION(dump_condition_stack(block)); +
- // Do in block motion of range checks
- in_block_motion(block, accessIndexed, arrays); +
- // Call all dominated blocks
- for (int i=0; idominates()->length(); i++) {
- BlockBegin *next = block->dominates()->at(i);
- if (!next->is_set(BlockBegin::donot_eliminate_range_checks)) {
// if current block is a loop header and:[](#l30.991)
// - next block belongs to the same loop[](#l30.992)
// or[](#l30.993)
// - next block belongs to an inner loop[](#l30.994)
// then current block is the loop header for next block[](#l30.995)
if (block->is_set(BlockBegin::linear_scan_loop_header_flag) && (block->loop_index() == next->loop_index() || next->loop_depth() > block->loop_depth())) {[](#l30.996)
calc_bounds(next, block);[](#l30.997)
} else {[](#l30.998)
calc_bounds(next, loop_header);[](#l30.999)
}[](#l30.1000)
- }
- } +
- // Reset stack
- for (int i=0; i<pushed.length(); i++) {
- _bounds[pushed[i]]->pop();
- } +} + +#ifndef PRODUCT +// Dump condition stack +void RangeCheckEliminator::dump_condition_stack(BlockBegin *block) {
- for (int i=0; i<_ir->linear_scan_order()->length(); i++) {
- BlockBegin *cur_block = _ir->linear_scan_order()->at(i);
- Instruction *instr = cur_block;
- for_each_phi_fun(cur_block, phi,
BoundStack *bound_stack = _bounds.at(phi->id());[](#l30.1017)
if (bound_stack && bound_stack->length() > 0) {[](#l30.1018)
Bound *bound = bound_stack->top();[](#l30.1019)
if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != phi || bound->upper_instr() != phi || bound->lower() != 0 || bound->upper() != 0)) {[](#l30.1020)
TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());[](#l30.1021)
tty->print("i%d", phi->id());[](#l30.1022)
tty->print(": ");[](#l30.1023)
bound->print();[](#l30.1024)
tty->print_cr("");[](#l30.1025)
);[](#l30.1026)
}[](#l30.1027)
});[](#l30.1028)
- while (!instr->as_BlockEnd()) {
if (instr->id() < _bounds.length()) {[](#l30.1031)
BoundStack *bound_stack = _bounds.at(instr->id());[](#l30.1032)
if (bound_stack && bound_stack->length() > 0) {[](#l30.1033)
Bound *bound = bound_stack->top();[](#l30.1034)
if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != instr || bound->upper_instr() != instr || bound->lower() != 0 || bound->upper() != 0)) {[](#l30.1035)
TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());[](#l30.1036)
tty->print("i%d", instr->id());[](#l30.1037)
tty->print(": ");[](#l30.1038)
bound->print();[](#l30.1039)
tty->print_cr("");[](#l30.1040)
);[](#l30.1041)
}[](#l30.1042)
}[](#l30.1043)
}[](#l30.1044)
instr = instr->next();[](#l30.1045)
- }
- } +} +#endif + +// Verification or the IR +RangeCheckEliminator::Verification::Verification(IR *ir) : _used(BlockBegin::number_of_blocks(), false) {
- this->_ir = ir;
- ir->iterate_linear_scan_order(this); +} + +// Verify this block +void RangeCheckEliminator::Verification::block_do(BlockBegin *block) {
- If *cond = block->end()->as_If();
- // Watch out: tsux and fsux can be the same
- if (block->number_of_sux() > 1) {
- for (int i=0; inumber_of_sux(); i++) {
BlockBegin *sux = block->sux_at(i);[](#l30.1063)
BlockBegin *pred = NULL;[](#l30.1064)
for (int j=0; j<sux->number_of_preds(); j++) {[](#l30.1065)
BlockBegin *cur = sux->pred_at(j);[](#l30.1066)
assert(cur != NULL, "Predecessor must not be null");[](#l30.1067)
if (!pred) {[](#l30.1068)
pred = cur;[](#l30.1069)
}[](#l30.1070)
assert(cur == pred, "Block must not have more than one predecessor if its predecessor has more than one successor");[](#l30.1071)
}[](#l30.1072)
assert(sux->number_of_preds() >= 1, "Block must have at least one predecessor");[](#l30.1073)
assert(sux->pred_at(0) == block, "Wrong successor");[](#l30.1074)
- }
- } +
- BlockBegin *dominator = block->dominator();
- if (dominator) {
- assert(block != _ir->start(), "Start block must not have a dominator!");
- assert(can_reach(dominator, block), "Dominator can't reach his block !");
- assert(can_reach(_ir->start(), dominator), "Dominator is unreachable !");
- assert(!can_reach(_ir->start(), block, dominator), "Wrong dominator ! Block can be reached anyway !");
- BlockList *all_blocks = _ir->linear_scan_order();
- for (int i=0; ilength(); i++) {
BlockBegin *cur = all_blocks->at(i);[](#l30.1086)
if (cur != dominator && cur != block) {[](#l30.1087)
assert(can_reach(dominator, block, cur), "There has to be another dominator!");[](#l30.1088)
}[](#l30.1089)
- }
- } else {
- assert(block == _ir->start(), "Only start block must not have a dominator");
- } +
- if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
- int loop_index = block->loop_index();
- BlockList *all_blocks = _ir->linear_scan_order();
- assert(block->number_of_preds() >= 1, "Block must have at least one predecessor");
- assert(!block->is_set(BlockBegin::exception_entry_flag), "Loop header must not be exception handler!");
- // Sometimes, the backbranch comes from an exception handler. In
- // this case, loop indexes/loop depths may not appear correct.
- bool loop_through_xhandler = false;
- for (int i = 0; i < block->number_of_exception_handlers(); i++) {
BlockBegin *xhandler = block->exception_handler_at(i);[](#l30.1104)
for (int j = 0; j < block->number_of_preds(); j++) {[](#l30.1105)
if (dominates(xhandler, block->pred_at(j)) || xhandler == block->pred_at(j)) {[](#l30.1106)
loop_through_xhandler = true;[](#l30.1107)
}[](#l30.1108)
}[](#l30.1109)
- }
- for (int i=0; inumber_of_sux(); i++) {
BlockBegin *sux = block->sux_at(i);[](#l30.1113)
assert(sux->loop_depth() != block->loop_depth() || sux->loop_index() == block->loop_index() || loop_through_xhandler, "Loop index has to be same");[](#l30.1114)
assert(sux->loop_depth() == block->loop_depth() || sux->loop_index() != block->loop_index(), "Loop index has to be different");[](#l30.1115)
- }
- for (int i=0; ilength(); i++) {
BlockBegin *cur = all_blocks->at(i);[](#l30.1119)
if (cur->loop_index() == loop_index && cur != block) {[](#l30.1120)
assert(dominates(block->dominator(), cur), "Dominator of loop header must dominate all loop blocks");[](#l30.1121)
}[](#l30.1122)
- }
- } +
- Instruction *cur = block;
- while (cur) {
- assert(cur->block() == block, "Block begin has to be set correctly!");
- cur = cur->next();
- } +} + +// Loop header must dominate all loop blocks +bool RangeCheckEliminator::Verification::dominates(BlockBegin *dominator, BlockBegin *block) {
- BlockBegin *cur = block->dominator();
- while (cur && cur != dominator) {
- cur = cur->dominator();
- }
- return cur == dominator; +} + +// Try to reach Block end beginning in Block start and not using Block dont_use +bool RangeCheckEliminator::Verification::can_reach(BlockBegin *start, BlockBegin *end, BlockBegin dont_use / = NULL */) {
- if (start == end) return start != dont_use;
- // Simple BSF from start to end
- // BlockBeginList _current;
- for (int i=0; i<_used.length(); i++) {
- _used[i] = false;
- }
- _current.truncate(0);
- _successors.truncate(0);
- if (start != dont_use) {
- _current.push(start);
- _used[start->block_id()] = true;
- } +
- // BlockBeginList _successors;
- while (_current.length() > 0) {
- BlockBegin *cur = _current.pop();
- // Add exception handlers to list
- for (int i=0; inumber_of_exception_handlers(); i++) {
BlockBegin *xhandler = cur->exception_handler_at(i);[](#l30.1162)
_successors.push(xhandler);[](#l30.1163)
// Add exception handlers of _successors to list[](#l30.1164)
for (int j=0; j<xhandler->number_of_exception_handlers(); j++) {[](#l30.1165)
BlockBegin *sux_xhandler = xhandler->exception_handler_at(j);[](#l30.1166)
_successors.push(sux_xhandler);[](#l30.1167)
}[](#l30.1168)
- }
- // Add normal _successors to list
- for (int i=0; inumber_of_sux(); i++) {
BlockBegin *sux = cur->sux_at(i);[](#l30.1172)
_successors.push(sux);[](#l30.1173)
// Add exception handlers of _successors to list[](#l30.1174)
for (int j=0; j<sux->number_of_exception_handlers(); j++) {[](#l30.1175)
BlockBegin *xhandler = sux->exception_handler_at(j);[](#l30.1176)
_successors.push(xhandler);[](#l30.1177)
}[](#l30.1178)
- }
- for (int i=0; i<_successors.length(); i++) {
BlockBegin *sux = _successors[i];[](#l30.1181)
assert(sux != NULL, "Successor must not be NULL!");[](#l30.1182)
if (sux == end) {[](#l30.1183)
return true;[](#l30.1184)
}[](#l30.1185)
if (sux != dont_use && !_used[sux->block_id()]) {[](#l30.1186)
_used[sux->block_id()] = true;[](#l30.1187)
_current.push(sux);[](#l30.1188)
}[](#l30.1189)
- }
- _successors.truncate(0);
- } +
- return false; +} + +// Bound +RangeCheckEliminator::Bound::~Bound() { +} + +// Bound constructor +RangeCheckEliminator::Bound::Bound() {
- init();
- this->_lower = min_jint;
- this->_upper = max_jint;
- this->_lower_instr = NULL;
- this->_upper_instr = NULL; +} + +// Bound constructor +RangeCheckEliminator::Bound::Bound(int lower, Value lower_instr, int upper, Value upper_instr) {
- init();
- assert(!lower_instr || !lower_instr->as_Constant() || !lower_instr->type()->as_IntConstant(), "Must not be constant!");
- assert(!upper_instr || !upper_instr->as_Constant() || !upper_instr->type()->as_IntConstant(), "Must not be constant!");
- this->_lower = lower;
- this->_upper = upper;
- this->_lower_instr = lower_instr;
- this->_upper_instr = upper_instr; +} + +// Bound constructor +RangeCheckEliminator::Bound::Bound(Instruction::Condition cond, Value v, int constant) {
- assert(!v || (v->type() && (v->type()->as_IntType() || v->type()->as_ObjectType())), "Type must be array or integer!");
- assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!"); +
- init();
- if (cond == Instruction::eql) {
- _lower = constant;
- _lower_instr = v;
- _upper = constant;
- _upper_instr = v;
- } else if (cond == Instruction::neq) {
- _lower = min_jint;
- _upper = max_jint;
- _lower_instr = NULL;
- _upper_instr = NULL;
- if (v == NULL) {
if (constant == min_jint) {[](#l30.1238)
_lower++;[](#l30.1239)
}[](#l30.1240)
if (constant == max_jint) {[](#l30.1241)
_upper--;[](#l30.1242)
}[](#l30.1243)
- }
- } else if (cond == Instruction::geq) {
- _lower = constant;
- _lower_instr = v;
- _upper = max_jint;
- _upper_instr = NULL;
- } else if (cond == Instruction::leq) {
- _lower = min_jint;
- _lower_instr = NULL;
- _upper = constant;
- _upper_instr = v;
- } else {
- ShouldNotReachHere();
- } +} + +// Set lower +void RangeCheckEliminator::Bound::set_lower(int value, Value v) {
- assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
- this->_lower = value;
- this->_lower_instr = v; +} + +// Set upper +void RangeCheckEliminator::Bound::set_upper(int value, Value v) {
- assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
- this->_upper = value;
- this->_upper_instr = v; +} + +// Add constant -> no overflow may occur +void RangeCheckEliminator::Bound::add_constant(int value) {
- this->_lower += value;
- this->_upper += value; +} + +// Init +void RangeCheckEliminator::Bound::init() { +} + +// or +void RangeCheckEliminator::Bound::or_op(Bound *b) {
- // Watch out, bound is not guaranteed not to overflow
- // Update lower bound
- if (_lower_instr != b->_lower_instr || (_lower_instr && _lower != b->_lower)) {
- _lower_instr = NULL;
- _lower = min_jint;
- } else {
- _lower = MIN2(_lower, b->_lower);
- }
- // Update upper bound
- if (_upper_instr != b->_upper_instr || (_upper_instr && _upper != b->_upper)) {
- _upper_instr = NULL;
- _upper = max_jint;
- } else {
- _upper = MAX2(_upper, b->_upper);
- } +} + +// and +void RangeCheckEliminator::Bound::and_op(Bound *b) {
- // Update lower bound
- if (_lower_instr == b->_lower_instr) {
- _lower = MAX2(_lower, b->_lower);
- }
- if (b->has_lower()) {
- bool set = true;
- if (_lower_instr != NULL && b->_lower_instr != NULL) {
set = (_lower_instr->dominator_depth() > b->_lower_instr->dominator_depth());[](#l30.1312)
- }
- if (set) {
_lower = b->_lower;[](#l30.1315)
_lower_instr = b->_lower_instr;[](#l30.1316)
- }
- }
- // Update upper bound
- if (_upper_instr == b->_upper_instr) {
- _upper = MIN2(_upper, b->_upper);
- }
- if (b->has_upper()) {
- bool set = true;
- if (_upper_instr != NULL && b->_upper_instr != NULL) {
set = (_upper_instr->dominator_depth() > b->_upper_instr->dominator_depth());[](#l30.1326)
- }
- if (set) {
_upper = b->_upper;[](#l30.1329)
_upper_instr = b->_upper_instr;[](#l30.1330)
- }
- } +} + +// has_upper +bool RangeCheckEliminator::Bound::has_upper() {
- return _upper_instr != NULL || _upper < max_jint; +} + +// is_smaller +bool RangeCheckEliminator::Bound::is_smaller(Bound *b) {
- if (b->_lower_instr != _upper_instr) {
- return false;
- }
- return _upper < b->_lower; +} + +// has_lower +bool RangeCheckEliminator::Bound::has_lower() {
- return _lower_instr != NULL || _lower > min_jint; +} + +// in_array_bound +bool RangeCheckEliminator::in_array_bound(Bound *bound, Value array){
- if (!bound) return false;
- assert(array != NULL, "Must not be null!");
- assert(bound != NULL, "Must not be null!");
- if (bound->lower() >=0 && bound->lower_instr() == NULL && bound->upper() < 0 && bound->upper_instr() != NULL) {
- ArrayLength *len = bound->upper_instr()->as_ArrayLength();
- if (bound->upper_instr() == array || (len != NULL && len->array() == array)) {
return true;[](#l30.1361)
- }
- }
- return false; +} + +// remove_lower +void RangeCheckEliminator::Bound::remove_lower() {
- _lower = min_jint;
- _lower_instr = NULL; +} + +// remove_upper +void RangeCheckEliminator::Bound::remove_upper() {
- _upper = max_jint;
- _upper_instr = NULL; +} + +// upper +int RangeCheckEliminator::Bound::upper() {
- return _upper; +} + +// lower +int RangeCheckEliminator::Bound::lower() {
- return _lower; +} + +// upper_instr +Value RangeCheckEliminator::Bound::upper_instr() {
- return _upper_instr; +} + +// lower_instr +Value RangeCheckEliminator::Bound::lower_instr() {
- return _lower_instr; +} + +// print +void RangeCheckEliminator::Bound::print() {
- tty->print("");
- if (this->_lower_instr || this->_lower != min_jint) {
- if (this->_lower_instr) {
tty->print("i%d", this->_lower_instr->id());[](#l30.1404)
if (this->_lower > 0) {[](#l30.1405)
tty->print("+%d", _lower);[](#l30.1406)
}[](#l30.1407)
if (this->_lower < 0) {[](#l30.1408)
tty->print("%d", _lower);[](#l30.1409)
}[](#l30.1410)
- } else {
tty->print("%d", _lower);[](#l30.1412)
- }
- tty->print(" <= ");
- }
- tty->print("x");
- if (this->_upper_instr || this->_upper != max_jint) {
- tty->print(" <= ");
- if (this->_upper_instr) {
tty->print("i%d", this->_upper_instr->id());[](#l30.1420)
if (this->_upper > 0) {[](#l30.1421)
tty->print("+%d", _upper);[](#l30.1422)
}[](#l30.1423)
if (this->_upper < 0) {[](#l30.1424)
tty->print("%d", _upper);[](#l30.1425)
}[](#l30.1426)
- } else {
tty->print("%d", _upper);[](#l30.1428)
- }
- } +} + +// Copy +RangeCheckEliminator::Bound *RangeCheckEliminator::Bound::copy() {
- Bound *b = new Bound();
- b->_lower = _lower;
- b->_lower_instr = _lower_instr;
- b->_upper = _upper;
- b->_upper_instr = _upper_instr;
- return b; +} + +#ifdef ASSERT +// Add assertion +void RangeCheckEliminator::Bound::add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond) {
- Instruction *result = position;
- Instruction *compare_with = NULL;
- ValueStack *state = position->state_before();
- if (position->as_BlockEnd() && !position->as_Goto()) {
- state = position->as_BlockEnd()->state_before();
- }
- Instruction *instruction_before = position->prev();
- if (position->as_Return() && Compilation::current()->method()->is_synchronized() && instruction_before->as_MonitorExit()) {
- instruction_before = instruction_before->prev();
- }
- result = instruction_before;
- // Load constant only if needed
- Constant *constant = NULL;
- if (i != 0 || !instr) {
- constant = new Constant(new IntConstant(i));
- NOT_PRODUCT(constant->set_printable_bci(position->printable_bci()));
- result = result->insert_after(constant);
- compare_with = constant;
- } +
- if (instr) {
- assert(instr->type()->as_ObjectType() || instr->type()->as_IntType(), "Type must be array or integer!");
- compare_with = instr;
- // Load array length if necessary
- Instruction *op = instr;
- if (instr->type()->as_ObjectType()) {
assert(state, "must not be null");[](#l30.1472)
ArrayLength *length = new ArrayLength(instr, state->copy());[](#l30.1473)
NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));[](#l30.1474)
length->set_exception_state(length->state_before());[](#l30.1475)
result = result->insert_after(length);[](#l30.1476)
op = length;[](#l30.1477)
compare_with = length;[](#l30.1478)
- }
- // Add operation only if necessary
- if (constant) {
ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, op, false, NULL);[](#l30.1482)
NOT_PRODUCT(ao->set_printable_bci(position->printable_bci()));[](#l30.1483)
result = result->insert_after(ao);[](#l30.1484)
compare_with = ao;[](#l30.1485)
// TODO: Check that add operation does not overflow
- }
- }
- assert(compare_with != NULL, "You have to compare with something!");
- assert(instruction != NULL, "Instruction must not be null!"); +
- if (instruction->type()->as_ObjectType()) {
- // Load array length if necessary
- Instruction *op = instruction;
- assert(state, "must not be null");
- ArrayLength *length = new ArrayLength(instruction, state->copy());
- length->set_exception_state(length->state_before());
- NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));
- result = result->insert_after(length);
- instruction = length;
- } +
- Assert *assert = new Assert(instruction, cond, false, compare_with);
- NOT_PRODUCT(assert->set_printable_bci(position->printable_bci()));
- result->insert_after(assert); +} + +// Add assertions +void RangeCheckEliminator::add_assertions(Bound *bound, Instruction *instruction, Instruction *position) {
- // Add lower bound assertion
- if (bound->has_lower()) {
- bound->add_assertion(instruction, position, bound->lower(), bound->lower_instr(), Instruction::geq);
- }
- // Add upper bound assertion
- if (bound->has_upper()) {
- bound->add_assertion(instruction, position, bound->upper(), bound->upper_instr(), Instruction::leq);
- } +} +#endif +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -0,0 +1,241 @@ +/*
- *
- *
- *
- *
- or visit www.oracle.com if you need additional information or have any
- *
- */ + +#ifndef SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP +#define SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP + +#include "c1/c1_Instruction.hpp" + +// Base class for range check elimination +class RangeCheckElimination : AllStatic { +public:
- static void eliminate(IR *ir); +}; + +// Implementation +class RangeCheckEliminator VALUE_OBJ_CLASS_SPEC { +private:
- int _number_of_instructions;
- bool _optimistic; // Insert predicates and deoptimize when they fail
- IR *_ir; +
- define_array(BlockBeginArray, BlockBegin*)
- define_stack(BlockBeginList, BlockBeginArray)
- define_stack(IntegerStack, intArray)
- define_array(IntegerMap, IntegerStack*) +
- class Verification : public _ValueObj /VALUE_OBJ_CLASS_SPEC/, public BlockClosure {
- private:
- IR *_ir;
- boolArray _used;
- BlockBeginList _current;
- BlockBeginList _successors;
- public:
- Verification(IR *ir);
- virtual void block_do(BlockBegin *block);
- bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
- bool dominates(BlockBegin *dominator, BlockBegin *block);
- }; + +public:
- // Bounds for an instruction in the form x + c which c integer
- // constant and x another instruction
- class Bound : public CompilationResourceObj {
- private:
- int _upper;
- Value _upper_instr;
- int _lower;
- Value _lower_instr;
- public:
- Bound();
- Bound(Value v);
- Bound(Instruction::Condition cond, Value v, int constant = 0);
- Bound(int lower, Value lower_instr, int upper, Value upper_instr);
- ~Bound();
- void add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond);
- int upper();
- Value upper_instr();
- int lower();
- Value lower_instr();
- void print();
- bool check_no_overflow(int const_value);
- void or_op(Bound *b);
- void and_op(Bound *b);
- bool has_upper();
- bool has_lower();
- void set_upper(int upper, Value upper_instr);
- void set_lower(int lower, Value lower_instr);
- bool is_smaller(Bound *b);
- void remove_upper();
- void remove_lower();
- void add_constant(int value);
- Bound *copy();
- private:
- void init();
- }; + +
- class Visitor : public InstructionVisitor {
- private:
- Bound *_bound;
- RangeCheckEliminator *_rce;
- public:
- void set_range_check_eliminator(RangeCheckEliminator *rce) { _rce = rce; }
- Bound *bound() const { return _bound; }
- void clear_bound() { _bound = NULL; }
- protected:
- // visitor functions
- void do_Constant (Constant* x);
- void do_IfOp (IfOp* x);
- void do_LogicOp (LogicOp* x);
- void do_ArithmeticOp (ArithmeticOp* x);
- void do_Phi (Phi* x);
- void do_StoreField (StoreField* x) { /* nothing to do */ };
- void do_StoreIndexed (StoreIndexed* x) { /* nothing to do */ };
- void do_MonitorEnter (MonitorEnter* x) { /* nothing to do */ };
- void do_MonitorExit (MonitorExit* x) { /* nothing to do */ };
- void do_Invoke (Invoke* x) { /* nothing to do */ };
- void do_UnsafePutRaw (UnsafePutRaw* x) { /* nothing to do */ };
- void do_UnsafePutObject(UnsafePutObject* x) { /* nothing to do */ };
- void do_Intrinsic (Intrinsic* x) { /* nothing to do */ };
- void do_Local (Local* x) { /* nothing to do */ };
- void do_LoadField (LoadField* x) { /* nothing to do */ };
- void do_ArrayLength (ArrayLength* x) { /* nothing to do */ };
- void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ };
- void do_NegateOp (NegateOp* x) { /* nothing to do */ };
- void do_ShiftOp (ShiftOp* x) { /* nothing to do */ };
- void do_CompareOp (CompareOp* x) { /* nothing to do */ };
- void do_Convert (Convert* x) { /* nothing to do */ };
- void do_NullCheck (NullCheck* x) { /* nothing to do */ };
- void do_TypeCast (TypeCast* x) { /* nothing to do */ };
- void do_NewInstance (NewInstance* x) { /* nothing to do */ };
- void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ };
- void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ };
- void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ };
- void do_CheckCast (CheckCast* x) { /* nothing to do */ };
- void do_InstanceOf (InstanceOf* x) { /* nothing to do */ };
- void do_BlockBegin (BlockBegin* x) { /* nothing to do */ };
- void do_Goto (Goto* x) { /* nothing to do */ };
- void do_If (If* x) { /* nothing to do */ };
- void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ };
- void do_TableSwitch (TableSwitch* x) { /* nothing to do */ };
- void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ };
- void do_Return (Return* x) { /* nothing to do */ };
- void do_Throw (Throw* x) { /* nothing to do */ };
- void do_Base (Base* x) { /* nothing to do */ };
- void do_OsrEntry (OsrEntry* x) { /* nothing to do */ };
- void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ };
- void do_RoundFP (RoundFP* x) { /* nothing to do */ };
- void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ };
- void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ };
- void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { /* nothing to do */ };
- void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ };
- void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
- void do_ProfileCall (ProfileCall* x) { /* nothing to do */ };
- void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
- void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
- void do_MemBar (MemBar* x) { /* nothing to do */ };
- void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
- void do_Assert (Assert* x) { /* nothing to do */ };
- }; + +#ifdef ASSERT
- void add_assertions(Bound *bound, Instruction *instruction, Instruction *position); +#endif +
- define_array(BoundArray, Bound *)
- define_stack(BoundStack, BoundArray)
- define_array(BoundMap, BoundStack *)
- define_array(AccessIndexedArray, AccessIndexed *)
- define_stack(AccessIndexedList, AccessIndexedArray)
- define_array(InstructionArray, Instruction *)
- define_stack(InstructionList, InstructionArray) +
- class AccessIndexedInfo : public CompilationResourceObj {
- public:
- AccessIndexedList *_list;
- int _min;
- int _max;
- }; +
- define_array(AccessIndexedInfoArray, AccessIndexedInfo *)
- BoundMap _bounds; // Mapping from Instruction's id to current bound
- AccessIndexedInfoArray _access_indexed_info; // Mapping from Instruction's id to AccessIndexedInfo for in block motion
- Visitor _visitor; + +public:
- RangeCheckEliminator(IR *ir); +
- IR *ir() const { return _ir; } +
- // Pass over the dominator tree to identify blocks where there's an oppportunity for optimization
- bool set_process_block_flags(BlockBegin *block);
- // The core of the optimization work: pass over the dominator tree
- // to propagate bound information, insert predicate out of loops,
- // eliminate bound checks when possible and perform in block motion
- void calc_bounds(BlockBegin *block, BlockBegin *loop_header);
- // reorder bound checks within a block in order to eliminate some of them
- void in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays); +
- // update/access current bound
- void update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant);
- void update_bound(IntegerStack &pushed, Value v, Bound *bound);
- Bound *get_bound(Value v); +
- bool loop_invariant(BlockBegin *loop_header, Instruction *instruction); // check for loop invariance
- void add_access_indexed_info(InstructionList &indices, int i, Value instruction, AccessIndexed *ai); // record indexed access for in block motion
- void remove_range_check(AccessIndexed *ai); // Mark this instructions as not needing a range check
- void add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition); // Update bound for an If
- bool in_array_bound(Bound *bound, Value array); // Check whether bound is known to fall within array +
- // helper functions to work with predicates
- Instruction* insert_after(Instruction* insert_position, Instruction* instr, int bci);
- Instruction* predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
- Instruction* predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=1);
- Instruction* predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
- Instruction* predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=-1); +
- void insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, // Add predicate
Instruction *length_instruction, Instruction *lower_instr, int lower,[](#l31.232)
Instruction *upper_instr, int upper, AccessIndexed *ai);[](#l31.233)
- bool is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, // Can we safely add a predicate?
Instruction *length_instr, Instruction *lower_instr,[](#l31.235)
int lower, Instruction *upper_instr, int upper);[](#l31.236)
- void process_if(IntegerStack &pushed, BlockBegin *block, If *cond); // process If Instruction
- void process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai); // process indexed access +
- void dump_condition_stack(BlockBegin *cur_block);
- static void print_statistics(); +}; + +#endif // SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
--- a/src/share/vm/c1/c1_Runtime1.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Runtime1.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -1330,6 +1330,50 @@ return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0; JRT_END +JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
- ResourceMark rm; +
- assert(!TieredCompilation, "incompatible with tiered compilation"); +
- RegisterMap reg_map(thread, false);
- frame runtime_frame = thread->last_frame();
- frame caller_frame = runtime_frame.sender(®_map); +
- nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
- assert (nm != NULL, "no more nmethod?");
- nm->make_not_entrant(); +
- methodHandle m(nm->method());
- MethodData* mdo = m->method_data(); +
- if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
- // Build an MDO. Ignore errors like OutOfMemory;
- // that simply means we won't have an MDO to update.
- Method::build_interpreter_method_data(m, THREAD);
- if (HAS_PENDING_EXCEPTION) {
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");[](#l32.28)
CLEAR_PENDING_EXCEPTION;[](#l32.29)
- }
- mdo = m->method_data();
- } +
- if (mdo != NULL) {
- mdo->inc_trap_count(Deoptimization::Reason_none);
- } +
- if (TracePredicateFailedTraps) {
- stringStream ss1, ss2;
- vframeStream vfst(thread);
- methodHandle inlinee = methodHandle(vfst.method());
- inlinee->print_short_name(&ss1);
- m->print_short_name(&ss2);
- tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc %x", ss1.as_string(), vfst.bci(), ss2.as_string(), caller_frame.pc());
- } + +
- Deoptimization::deoptimize_frame(thread, caller_frame.id()); + +JRT_END
#ifndef PRODUCT void Runtime1::print_statistics() {
--- a/src/share/vm/c1/c1_Runtime1.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_Runtime1.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -71,6 +71,7 @@ stub(g1_post_barrier_slow) [](#l33.4) stub(fpu2long_stub) [](#l33.5) stub(counter_overflow) [](#l33.6)
#define DECLARE_STUB_ID(x) x ## _id , @@ -190,6 +191,8 @@ static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length); static int is_instance_of(oopDesc* mirror, oopDesc* obj);
- static void predicate_failed_trap(JavaThread* thread); + static void print_statistics() PRODUCT_RETURN; };
--- a/src/share/vm/c1/c1_ValueMap.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_ValueMap.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -26,9 +26,9 @@ #include "c1/c1_Canonicalizer.hpp" #include "c1/c1_IR.hpp" #include "c1/c1_ValueMap.hpp" +#include "c1/c1_ValueStack.hpp" #include "utilities/bitMap.inline.hpp" - #ifndef PRODUCT int ValueMap::_number_of_finds = 0; @@ -192,10 +192,6 @@ && lf->field()->holder() == field->holder() [](#l34.15) && (all_offsets || lf->field()->offset() == field->offset()); -#define MUST_KILL_EXCEPTION(must_kill, entry, value) [](#l34.18)
- assert(entry->nesting() < nesting(), "must not find bigger nesting than current"); [](#l34.19)
- bool must_kill = (entry->nesting() == nesting() - 1); - void ValueMap::kill_memory() { GENERIC_KILL_VALUE(MUST_KILL_MEMORY); @@ -209,11 +205,6 @@ GENERIC_KILL_VALUE(MUST_KILL_FIELD); } -void ValueMap::kill_exception() {
- GENERIC_KILL_VALUE(MUST_KILL_EXCEPTION); -} - - void ValueMap::kill_map(ValueMap* map) { assert(is_global_value_numbering(), "only for global value numbering"); _killed_values.set_union(&map->_killed_values); @@ -274,6 +265,8 @@ GlobalValueNumbering* _gvn; BlockList _loop_blocks; bool _too_complicated_loop;
- bool _has_field_store[T_ARRAY + 1];
- bool _has_indexed_store[T_ARRAY + 1]; // simplified access to methods of GlobalValueNumbering ValueMap* current_map() { return _gvn->current_map(); } @@ -281,8 +274,16 @@ // implementation for abstract methods of ValueNumberingVisitor void kill_memory() { _too_complicated_loop = true; }
- void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); };
- void kill_array(ValueType* type) { current_map()->kill_array(type); };
- void kill_field(ciField* field, bool all_offsets) {
- current_map()->kill_field(field, all_offsets);
- assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
- _has_field_store[field->type()->basic_type()] = true;
- }
- void kill_array(ValueType* type) {
- current_map()->kill_array(type);
- BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
- _has_indexed_store[basic_type] = true;
- } public: ShortLoopOptimizer(GlobalValueNumbering* gvn) @@ -290,11 +291,141 @@ , _loop_blocks(ValueMapMaxLoopSize) , _too_complicated_loop(false) {
- for (int i=0; i<= T_ARRAY; i++){
_has_field_store[i] = false;[](#l34.70)
_has_indexed_store[i] = false;[](#l34.71)
- }
- } +
- bool has_field_store(BasicType type) {
- assert(type >= 0 && type <= T_ARRAY, "Invalid type");
- return _has_field_store[type];
- } +
- bool has_indexed_store(BasicType type) {
- assert(type >= 0 && type <= T_ARRAY, "Invalid type");
- return _has_indexed_store[type];
} bool process(BlockBegin* loop_header); }; +class LoopInvariantCodeMotion : public StackObj {
- private:
- GlobalValueNumbering* _gvn;
- ShortLoopOptimizer* _short_loop_optimizer;
- Instruction* _insertion_point;
- ValueStack * _state; +
- void set_invariant(Value v) const { _gvn->set_processed(v); }
- bool is_invariant(Value v) const { return _gvn->is_processed(v); } +
- void process_block(BlockBegin* block); +
- public:
- LoopInvariantCodeMotion(ShortLoopOptimizer slo, GlobalValueNumbering gvn, BlockBegin* loop_header, BlockList* loop_blocks); +}; + +LoopInvariantCodeMotion::LoopInvariantCodeMotion(ShortLoopOptimizer slo, GlobalValueNumbering gvn, BlockBegin* loop_header, BlockList* loop_blocks)
- : _gvn(gvn), _short_loop_optimizer(slo) { +
- TRACE_VALUE_NUMBERING(tty->print_cr("using loop invariant code motion loop_header = %d", loop_header->block_id()));
- TRACE_VALUE_NUMBERING(tty->print_cr("** loop invariant code motion for short loop B%d", loop_header->block_id())); +
- BlockBegin* insertion_block = loop_header->dominator();
- if (insertion_block->number_of_preds() == 0) {
- return; // only the entry block does not have a predecessor
- } +
- assert(insertion_block->end()->as_Base() == NULL, "cannot insert into entry block");
- _insertion_point = insertion_block->end()->prev(); +
- BlockEnd *block_end = insertion_block->end();
- _state = block_end->state_before(); +
- if (!_state) {
- // If, TableSwitch and LookupSwitch always have state_before when
- // loop invariant code motion happens..
- assert(block_end->as_Goto(), "Block has to be goto");
- _state = block_end->state();
- } +
- // the loop_blocks are filled by going backward from the loop header, so this processing order is best
- assert(loop_blocks->at(0) == loop_header, "loop header must be first loop block");
- process_block(loop_header);
- for (int i = loop_blocks->length() - 1; i >= 1; i--) {
- process_block(loop_blocks->at(i));
- } +} + +void LoopInvariantCodeMotion::process_block(BlockBegin* block) {
- TRACE_VALUE_NUMBERING(tty->print_cr("processing block B%d", block->block_id())); +
- Instruction* prev = block;
- Instruction* cur = block->next(); +
- while (cur != NULL) { +
- // determine if cur instruction is loop invariant
- // only selected instruction types are processed here
- bool cur_invariant = false;
- if (cur->as_Constant() != NULL) {
cur_invariant = !cur->can_trap();[](#l34.149)
- } else if (cur->as_ArithmeticOp() != NULL || cur->as_LogicOp() != NULL || cur->as_ShiftOp() != NULL) {
assert(cur->as_Op2() != NULL, "must be Op2");[](#l34.151)
Op2* op2 = (Op2*)cur;[](#l34.152)
cur_invariant = !op2->can_trap() && is_invariant(op2->x()) && is_invariant(op2->y());[](#l34.153)
- } else if (cur->as_LoadField() != NULL) {
LoadField* lf = (LoadField*)cur;[](#l34.155)
// deoptimizes on NullPointerException[](#l34.156)
cur_invariant = !lf->needs_patching() && !lf->field()->is_volatile() && !_short_loop_optimizer->has_field_store(lf->field()->type()->basic_type()) && is_invariant(lf->obj());[](#l34.157)
- } else if (cur->as_ArrayLength() != NULL) {
ArrayLength *length = cur->as_ArrayLength();[](#l34.159)
cur_invariant = is_invariant(length->array());[](#l34.160)
- } else if (cur->as_LoadIndexed() != NULL) {
LoadIndexed *li = (LoadIndexed *)cur->as_LoadIndexed();[](#l34.162)
cur_invariant = !_short_loop_optimizer->has_indexed_store(as_BasicType(cur->type())) && is_invariant(li->array()) && is_invariant(li->index());[](#l34.163)
- }
- if (cur_invariant) {
// perform value numbering and mark instruction as loop-invariant[](#l34.167)
_gvn->substitute(cur);[](#l34.168)
if (cur->as_Constant() == NULL) {[](#l34.170)
// ensure that code for non-constant instructions is always generated[](#l34.171)
cur->pin();[](#l34.172)
}[](#l34.173)
// remove cur instruction from loop block and append it to block before loop[](#l34.175)
Instruction* next = cur->next();[](#l34.176)
Instruction* in = _insertion_point->next();[](#l34.177)
_insertion_point = _insertion_point->set_next(cur);[](#l34.178)
cur->set_next(in);[](#l34.179)
// Deoptimize on exception[](#l34.181)
cur->set_flag(Instruction::DeoptimizeOnException, true);[](#l34.182)
// Clear exception handlers[](#l34.184)
cur->set_exception_handlers(NULL);[](#l34.185)
TRACE_VALUE_NUMBERING(tty->print_cr("Instruction %c%d is loop invariant", cur->type()->tchar(), cur->id()));[](#l34.187)
if (cur->state_before() != NULL) {[](#l34.189)
cur->set_state_before(_state->copy());[](#l34.190)
}[](#l34.191)
if (cur->exception_state() != NULL) {[](#l34.192)
cur->set_exception_state(_state->copy());[](#l34.193)
}[](#l34.194)
cur = prev->set_next(next);[](#l34.196)
- } else {
prev = cur;[](#l34.199)
cur = cur->next();[](#l34.200)
- }
- } +} bool ShortLoopOptimizer::process(BlockBegin* loop_header) { TRACE_VALUE_NUMBERING(tty->print_cr("** loop header block")); @@ -316,6 +447,10 @@ for (int j = block->number_of_preds() - 1; j >= 0; j--) { BlockBegin* pred = block->pred_at(j);
if (pred->is_set(BlockBegin::osr_entry_flag)) {[](#l34.211)
return false;[](#l34.212)
}[](#l34.213)
+ ValueMap* pred_map = value_map_of(pred); if (pred_map != NULL) { current_map()->kill_map(pred_map); @@ -336,6 +471,12 @@ } }
- bool optimistic = this->_gvn->compilation()->is_optimistic(); +
- if (UseLoopInvariantCodeMotion && optimistic) {
- LoopInvariantCodeMotion code_motion(this, _gvn, loop_header, &_loop_blocks);
- } + TRACE_VALUE_NUMBERING(tty->print_cr("** loop successfully optimized")); return true; } @@ -344,11 +485,11 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir) : _current_map(NULL) , _value_maps(ir->linear_scan_order()->length(), NULL)
- , _compilation(ir->compilation()) { TRACE_VALUE_NUMBERING(tty->print_cr("****** start of global value numbering")); ShortLoopOptimizer short_loop_optimizer(this);
- int subst_count = 0; BlockList* blocks = ir->linear_scan_order(); int num_blocks = blocks->length(); @@ -357,6 +498,12 @@ assert(start_block == ir->start() && start_block->number_of_preds() == 0 && start_block->dominator() == NULL, "must be start block"); assert(start_block->next()->as_Base() != NULL && start_block->next()->next() == NULL, "start block must not have instructions");
- // method parameters are not linked in instructions list, so process them separateley
- for_each_state_value(start_block->state(), value,
assert(value->as_Local() != NULL, "only method parameters allowed");[](#l34.250)
set_processed(value);[](#l34.251)
- ); + // initial, empty value map with nesting 0 set_value_map_of(start_block, new ValueMap()); @@ -374,7 +521,7 @@ // create new value map with increased nesting _current_map = new ValueMap(value_map_of(dominator));
- if (num_preds == 1 && !block->is_set(BlockBegin::exception_entry_flag)) { assert(dominator == block->pred_at(0), "dominator must be equal to predecessor"); // nothing to do here @@ -403,36 +550,41 @@ } }
- if (block->is_set(BlockBegin::exception_entry_flag)) {
current_map()->kill_exception();[](#l34.271)
- }
- // phi functions are not linked in instructions list, so process them separateley
- for_each_phi_fun(block, phi,
set_processed(phi);[](#l34.275)
- );
TRACE_VALUE_NUMBERING(tty->print("value map before processing block: "); current_map()->print()); // visit all instructions of this block for (Value instr = block->next(); instr != NULL; instr = instr->next()) {
assert(!instr->has_subst(), "substitution already set");[](#l34.282)
- // check if instruction kills any values instr->visit(this); -
if (instr->hash() != 0) {[](#l34.287)
Value f = current_map()->find_insert(instr);[](#l34.288)
if (f != instr) {[](#l34.289)
assert(!f->has_subst(), "can't have a substitution");[](#l34.290)
instr->set_subst(f);[](#l34.291)
subst_count++;[](#l34.292)
}[](#l34.293)
}[](#l34.294)
// perform actual value numbering[](#l34.295)
} // remember value map for successors set_value_map_of(block, current_map()); }substitute(instr);[](#l34.296)
- if (_has_substitutions) { SubstitutionResolver resolver(ir); } TRACE_VALUE_NUMBERING(tty->print("****** end of global value numbering. "); ValueMap::print_statistics()); } + +void GlobalValueNumbering::substitute(Instruction* instr) {
- assert(!instr->has_subst(), "substitution already set");
- Value subst = current_map()->find_insert(instr);
- if (subst != instr) {
- assert(!subst->has_subst(), "can't have a substitution");
- TRACE_VALUE_NUMBERING(tty->print_cr("substitution for %d set to %d", instr->id(), subst->id()));
- instr->set_subst(subst);
- _has_substitutions = true;
- }
- set_processed(instr); +}
--- a/src/share/vm/c1/c1_ValueMap.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_ValueMap.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -206,6 +206,8 @@ void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do / }; void do_RuntimeCall (RuntimeCall x) { /* nothing to do / }; void do_MemBar (MemBar x) { /* nothing to do */ };
- void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
- void do_Assert (Assert* x) { /* nothing to do */ }; }; @@ -225,15 +227,22 @@ class GlobalValueNumbering: public ValueNumberingVisitor { private:
- Compilation* _compilation; // compilation data ValueMap* _current_map; // value map of current block ValueMapArray _value_maps; // list of value maps for all blocks
- ValueSet _processed_values; // marker for instructions that were already processed
- bool _has_substitutions; // set to true when substitutions must be resolved public: // accessors
- Compilation* compilation() const { return _compilation; } ValueMap* current_map() { return _current_map; } ValueMap* value_map_of(BlockBegin* block) { return _value_maps.at(block->linear_scan_number()); } void set_value_map_of(BlockBegin* block, ValueMap* map) { assert(value_map_of(block) == NULL, ""); _value_maps.at_put(block->linear_scan_number(), map); }
- bool is_processed(Value v) { return _processed_values.contains(v); }
- void set_processed(Value v) { _processed_values.put(v); } + // implementation for abstract methods of ValueNumberingVisitor void kill_memory() { current_map()->kill_memory(); } void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); } @@ -241,6 +250,7 @@ // main entry point that performs global value numbering GlobalValueNumbering(IR* ir);
- void substitute(Instruction* instr); // substitute instruction if it is contained in current value map };
#endif // SHARE_VM_C1_C1_VALUEMAP_HPP
--- a/src/share/vm/c1/c1_globals.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/c1/c1_globals.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -119,6 +119,24 @@ develop(bool, UseGlobalValueNumbering, true, [](#l36.4) "Use Global Value Numbering (separate phase)") [](#l36.5) [](#l36.6)
- product(bool, UseLoopInvariantCodeMotion, true, [](#l36.7)
"Simple loop invariant code motion for short loops during GVN") \[](#l36.8)
\[](#l36.9)
- develop(bool, TracePredicateFailedTraps, false, [](#l36.10)
"trace runtime traps caused by predicate failure") \[](#l36.11)
\[](#l36.12)
- develop(bool, StressLoopInvariantCodeMotion, false, [](#l36.13)
"stress loop invariant code motion") \[](#l36.14)
\[](#l36.15)
- develop(bool, TraceRangeCheckElimination, false, [](#l36.16)
"Trace Range Check Elimination") \[](#l36.17)
\[](#l36.18)
- develop(bool, AssertRangeCheckElimination, false, [](#l36.19)
"Assert Range Check Elimination") \[](#l36.20)
\[](#l36.21)
- develop(bool, StressRangeCheckElimination, false, [](#l36.22)
"stress Range Check Elimination") \[](#l36.23)
develop(bool, PrintValueNumbering, false, [](#l36.25) "Print Value Numbering") [](#l36.26) [](#l36.27)\[](#l36.24)
--- a/src/share/vm/compiler/compileBroker.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -2166,6 +2166,9 @@ comp->print_timers(); } tty->cr();
- tty->print_cr(" Total compiled methods : %6d methods", CompileBroker::_total_compile_count);
- tty->print_cr(" Standard compilation : %6d methods", CompileBroker::_total_standard_compile_count);
- tty->print_cr(" On stack replacement : %6d methods", CompileBroker::_total_osr_compile_count); int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled; tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb); tty->print_cr(" Standard compilation : %6d bytes", CompileBroker::_sum_standard_bytes_compiled);
--- a/src/share/vm/oops/instanceKlass.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/oops/instanceKlass.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -2228,8 +2228,6 @@ } void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) { -#ifdef COMPILER2
- // Currently only used by C2. for (int m = 0; m < methods()->length(); m++) { MethodData* mdo = methods()->at(m)->method_data(); if (mdo != NULL) { @@ -2240,15 +2238,6 @@ } } } -#else -#ifdef ASSERT
- // Verify that we haven't started to use MDOs for C1.
- for (int m = 0; m < methods()->length(); m++) {
- MethodData* mdo = methods()->at(m)->method_data();
- assert(mdo == NULL, "Didn't expect C1 to use MDOs");
- } -#endif // ASSERT -#endif // !COMPILER2 }
--- a/src/share/vm/oops/methodData.cpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/oops/methodData.cpp Thu Mar 21 09:27:54 2013 +0100 @@ -392,6 +392,9 @@ } int MethodData::bytecode_cell_count(Bytecodes::Code code) { +#if defined(COMPILER1) && !defined(COMPILER2)
- return no_profile_data; +#else switch (code) { case Bytecodes::_checkcast: case Bytecodes::_instanceof: @@ -438,6 +441,7 @@ return variable_cell_count; } return no_profile_data; +#endif } // Compute the size of the profiling information corresponding to @@ -509,6 +513,9 @@ // the segment in bytes. int MethodData::initialize_data(BytecodeStream* stream, int data_index) {
+#if defined(COMPILER1) && !defined(COMPILER2)
- return 0; +#else int cell_count = -1; int tag = DataLayout::no_tag; DataLayout* data_layout = data_layout_at(data_index); @@ -587,6 +594,7 @@ assert(!bytecode_has_profile(c), "agree w/ !BHP"); return 0; } +#endif } // Get the data at an arbitrary (sort of) data index.
--- a/src/share/vm/runtime/globals.hpp Wed Mar 20 17:04:45 2013 -0700 +++ b/src/share/vm/runtime/globals.hpp Thu Mar 21 09:27:54 2013 +0100 @@ -2515,7 +2515,7 @@ "disable locking assertions (for speed)") [](#l40.4) [](#l40.5) product(bool, RangeCheckElimination, true, [](#l40.6)
"Split loop iterations to eliminate range checks") \[](#l40.7)
develop_pd(bool, UncommonNullCast, [](#l40.10) "track occurrences of null in casts; adjust compiler tactics") [](#l40.11)"Eliminate range checks") \[](#l40.8) \[](#l40.9)