7174218: remove AtomicLongCSImpl intrinsics (original) (raw)

diff -r 8f37087fc13f src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp

--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -644,30 +644,6 @@

}

-void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {

- assert(x->number_of_arguments() == 3, "wrong type");

- LIRItem obj (x->argument_at(0), this); // AtomicLong object

- LIRItem cmp_value (x->argument_at(1), this); // value to compare with field

- LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value

-

- obj.load_item();

- cmp_value.load_item();

- new_value.load_item();

-

- // generate compare-and-swap and produce zero condition if swap occurs

- int value_offset = sun_misc_AtomicLongCSImpl::value_offset();

- LIR_Opr addr = FrameMap::O7_opr;

- __ add(obj.result(), LIR_OprFact::intConst(value_offset), addr);

- LIR_Opr t1 = FrameMap::G1_opr; // temp for 64-bit value

- LIR_Opr t2 = FrameMap::G3_opr; // temp for 64-bit value

- __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);

-

- // generate conditional move of boolean result

- LIR_Opr result = rlock_result(x);

- __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);

-}

-

-

void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {

assert(x->number_of_arguments() == 4, "wrong type");

LIRItem obj (x->argument_at(0), this); // object

diff -r 8f37087fc13f src/cpu/sparc/vm/sparc.ad

--- a/src/cpu/sparc/vm/sparc.ad Tue Jun 05 10:15:27 2012 +0200

+++ b/src/cpu/sparc/vm/sparc.ad Wed Jun 06 01:15:36 2012 +0800

@@ -827,7 +827,6 @@

// a Load

// inputs are (0:control, 1:memory, 2:address)

if (!(n->ideal_Opcode()==ld_op) && // Following are special cases

- !(n->ideal_Opcode()==Op_LoadLLocked && ld_op==Op_LoadI) &&

!(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&

!(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&

!(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&

@@ -7306,17 +7305,6 @@

ins_pipe(iload_mem);

%}

-// LoadL-locked. Same as a regular long load when used with a compare-swap

-instruct loadLLocked(iRegL dst, memory mem) %{

- match(Set dst (LoadLLocked mem));

- ins_cost(MEMORY_REF_COST);

- size(4);

- format %{ "LDX mem,mem,mem,dst\t! long" %}

- opcode(Assembler::ldx_op3);

- ins_encode(simple_form3_mem_reg( mem, dst ) );

- ins_pipe(iload_mem);

-%}

-

instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{

match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));

effect( KILL newval );

diff -r 8f37087fc13f src/cpu/x86/vm/c1_LIRGenerator_x86.cpp

--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -718,35 +718,6 @@

}

-void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {

- assert(x->number_of_arguments() == 3, "wrong type");

- LIRItem obj (x->argument_at(0), this); // AtomicLong object

- LIRItem cmp_value (x->argument_at(1), this); // value to compare with field

- LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value

-

- // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction

- cmp_value.load_item_force(FrameMap::long0_opr);

-

- // new value must be in rcx,ebx (hi,lo)

- new_value.load_item_force(FrameMap::long1_opr);

-

- // object pointer register is overwritten with field address

- obj.load_item();

-

- // generate compare-and-swap; produces zero condition if swap occurs

- int value_offset = sun_misc_AtomicLongCSImpl::value_offset();

- LIR_Opr addr = new_pointer_register();

- __ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr);

- LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed

- LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed

- __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);

-

- // generate conditional move of boolean result

- LIR_Opr result = rlock_result(x);

- __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);

-}

-

-

void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {

assert(x->number_of_arguments() == 4, "wrong type");

LIRItem obj (x->argument_at(0), this); // object

diff -r 8f37087fc13f src/cpu/x86/vm/x86_32.ad

--- a/src/cpu/x86/vm/x86_32.ad Tue Jun 05 10:15:27 2012 +0200

+++ b/src/cpu/x86/vm/x86_32.ad Wed Jun 06 01:15:36 2012 +0800

@@ -7796,50 +7796,6 @@

ins_pipe( ialu_reg_mem );

%}

-// LoadLong-locked - same as a volatile long load when used with compare-swap

-instruct loadLLocked(stackSlotL dst, memory mem) %{

- predicate(UseSSE<=1);

- match(Set dst (LoadLLocked mem));

-

- ins_cost(200);

- format %{ "FILD $mem\t# Atomic volatile long load\n\t"

- "FISTp $dst" %}

- ins_encode(enc_loadL_volatile(mem,dst));

- ins_pipe( fpu_reg_mem );

-%}

-

-instruct loadLX_Locked(stackSlotL dst, memory mem, regD tmp) %{

- predicate(UseSSE>=2);

- match(Set dst (LoadLLocked mem));

- effect(TEMP tmp);

- ins_cost(180);

- format %{ "MOVSD tmp,tmp,tmp,mem\t# Atomic volatile long load\n\t"

- "MOVSD dst,dst,dst,tmp" %}

- ins_encode %{

- __ movdbl($tmp$$XMMRegister, memmemmem$Address);

- __ movdbl(Address(rsp, dstdstdst$disp), tmptmptmp$XMMRegister);

- %}

- ins_pipe( pipe_slow );

-%}

-

-instruct loadLX_reg_Locked(eRegL dst, memory mem, regD tmp) %{

- predicate(UseSSE>=2);

- match(Set dst (LoadLLocked mem));

- effect(TEMP tmp);

- ins_cost(160);

- format %{ "MOVSD tmp,tmp,tmp,mem\t# Atomic volatile long load\n\t"

- "MOVD dst.lo,dst.lo,dst.lo,tmp\n\t"

- "PSRLQ $tmp,32\n\t"

- "MOVD dst.hi,dst.hi,dst.hi,tmp" %}

- ins_encode %{

- __ movdbl($tmp$$XMMRegister, memmemmem$Address);

- __ movdl($dst$$Register, tmptmptmp$XMMRegister);

- __ psrlq($tmp$$XMMRegister, 32);

- __ movdl(HIGH_FROM_LOW($dst$$Register), tmptmptmp$XMMRegister);

- %}

- ins_pipe( pipe_slow );

-%}

-

// Conditional-store of the updated heap-top.

// Used during allocation of the shared heap.

// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.

diff -r 8f37087fc13f src/cpu/x86/vm/x86_64.ad

--- a/src/cpu/x86/vm/x86_64.ad Tue Jun 05 10:15:27 2012 +0200

+++ b/src/cpu/x86/vm/x86_64.ad Wed Jun 06 01:15:36 2012 +0800

@@ -7492,18 +7492,6 @@

ins_pipe(ialu_reg_mem); // XXX

%}

-// LoadL-locked - same as a regular LoadL when used with compare-swap

-instruct loadLLocked(rRegL dst, memory mem)

-%{

- match(Set dst (LoadLLocked mem));

-

- ins_cost(125); // XXX

- format %{ "movq dst,dst, dst,mem\t# long locked" %}

- opcode(0x8B);

- ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));

- ins_pipe(ialu_reg_mem); // XXX

-%}

-

// Conditional-store of the updated heap-top.

// Used during allocation of the shared heap.

// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.

diff -r 8f37087fc13f src/share/vm/adlc/forms.cpp

--- a/src/share/vm/adlc/forms.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/adlc/forms.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -261,7 +261,6 @@

if( strcmp(opType,"LoadL")==0 ) return Form::idealL;

if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL;

if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;

- if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL;

if( strcmp(opType,"LoadP")==0 ) return Form::idealP;

if( strcmp(opType,"LoadN")==0 ) return Form::idealN;

if( strcmp(opType,"LoadRange")==0 ) return Form::idealI;

diff -r 8f37087fc13f src/share/vm/adlc/formssel.cpp

--- a/src/share/vm/adlc/formssel.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/adlc/formssel.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -3387,7 +3387,7 @@

"Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,

"Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S",

"LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned",

- "LoadPLocked", "LoadLLocked",

"StorePConditional", "StoreIConditional", "StoreLConditional",

"CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",

"StoreCM",

diff -r 8f37087fc13f src/share/vm/c1/c1_GraphBuilder.cpp

--- a/src/share/vm/c1/c1_GraphBuilder.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/c1/c1_GraphBuilder.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -3195,13 +3195,6 @@

preserves_state = true;

break;

- // sun/misc/AtomicLong.attemptUpdate

- case vmIntrinsics::_attemptUpdate :

- if (!VM_Version::supports_cx8()) return false;

- if (!InlineAtomicLong) return false;

- preserves_state = true;

- break;

-

// Use special nodes for Unsafe instructions so we can more easily

// perform an address-mode optimization on the raw variants

case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false);

diff -r 8f37087fc13f src/share/vm/c1/c1_LIRGenerator.cpp

--- a/src/share/vm/c1/c1_LIRGenerator.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -3009,11 +3009,6 @@

do_CompareAndSwap(x, longType);

break;

- // sun.misc.AtomicLongCSImpl.attemptUpdate

- case vmIntrinsics::_attemptUpdate:

- do_AttemptUpdate(x);

- break;

-

case vmIntrinsics::_Reference_get:

do_Reference_get(x);

break;

diff -r 8f37087fc13f src/share/vm/c1/c1_LIRGenerator.hpp

--- a/src/share/vm/c1/c1_LIRGenerator.hpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/c1/c1_LIRGenerator.hpp Wed Jun 06 01:15:36 2012 +0800

@@ -244,7 +244,6 @@

void do_MathIntrinsic(Intrinsic* x);

void do_ArrayCopy(Intrinsic* x);

void do_CompareAndSwap(Intrinsic* x, ValueType* type);

- void do_AttemptUpdate(Intrinsic* x);

void do_NIOCheckIndex(Intrinsic* x);

void do_FPIntrinsics(Intrinsic* x);

void do_Reference_get(Intrinsic* x);

diff -r 8f37087fc13f src/share/vm/classfile/javaClasses.cpp

--- a/src/share/vm/classfile/javaClasses.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/classfile/javaClasses.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -2919,7 +2919,6 @@

int java_lang_AssertionStatusDirectives::packageEnabled_offset;

int java_lang_AssertionStatusDirectives::deflt_offset;

int java_nio_Buffer::_limit_offset;

-int sun_misc_AtomicLongCSImpl::_value_offset;

int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;

int sun_reflect_ConstantPool::_cp_oop_offset;

int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;

@@ -2979,21 +2978,6 @@

compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());

}

-// Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate

-int sun_misc_AtomicLongCSImpl::value_offset() {

- assert(SystemDictionary::AtomicLongCSImpl_klass() != NULL, "can't call this");

- return _value_offset;

-}

-

-

-void sun_misc_AtomicLongCSImpl::compute_offsets() {

- klassOop k = SystemDictionary::AtomicLongCSImpl_klass();

- // If this class is not present, its value field offset won't be referenced.

- if (k != NULL) {

- compute_offset(_value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature());

- }

-}

-

void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {

if (_owner_offset != 0) return;

@@ -3098,7 +3082,6 @@

sun_reflect_ConstantPool::compute_offsets();

sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();

}

- sun_misc_AtomicLongCSImpl::compute_offsets();

// generated interpreter code wants to know about the offsets we just computed:

AbstractAssembler::update_delayed_values();

diff -r 8f37087fc13f src/share/vm/classfile/javaClasses.hpp

--- a/src/share/vm/classfile/javaClasses.hpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/classfile/javaClasses.hpp Wed Jun 06 01:15:36 2012 +0800

@@ -1383,15 +1383,6 @@

static void compute_offsets();

};

-class sun_misc_AtomicLongCSImpl: AllStatic {

- private:

- static int _value_offset;

-

- public:

- static int value_offset();

- static void compute_offsets();

-};

-

class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {

private:

static int _owner_offset;

diff -r 8f37087fc13f src/share/vm/classfile/systemDictionary.hpp

--- a/src/share/vm/classfile/systemDictionary.hpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/classfile/systemDictionary.hpp Wed Jun 06 01:15:36 2012 +0800

@@ -170,9 +170,6 @@

/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \

template(nio_Buffer_klass, java_nio_Buffer, Opt) \

\

- /* If this class isn't present, it won't be referenced. */ \

- template(AtomicLongCSImpl_klass, sun_misc_AtomicLongCSImpl, Opt) \

- \

template(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \

\

template(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \

diff -r 8f37087fc13f src/share/vm/classfile/vmSymbols.hpp

--- a/src/share/vm/classfile/vmSymbols.hpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/classfile/vmSymbols.hpp Wed Jun 06 01:15:36 2012 +0800

@@ -722,15 +722,6 @@

/* java/lang/ref/Reference */ \

do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \

\

- \

- do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \

- do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \

- /* (symbols get_name and void_long_signature defined above) */ \

- \

- do_intrinsic(_attemptUpdate, sun_misc_AtomicLongCSImpl, attemptUpdate_name, attemptUpdate_signature, F_R) \

- do_name( attemptUpdate_name, "attemptUpdate") \

- do_signature(attemptUpdate_signature, "(JJ)Z") \

- \

/* support for sun.misc.Unsafe */ \

do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \

\

diff -r 8f37087fc13f src/share/vm/opto/classes.hpp

--- a/src/share/vm/opto/classes.hpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/opto/classes.hpp Wed Jun 06 01:15:36 2012 +0800

@@ -147,7 +147,6 @@

macro(LoadL)

macro(LoadL_unaligned)

macro(LoadPLocked)

-macro(LoadLLocked)

macro(LoadP)

macro(LoadN)

macro(LoadRange)

diff -r 8f37087fc13f src/share/vm/opto/compile.cpp

--- a/src/share/vm/opto/compile.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/opto/compile.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -2297,7 +2297,6 @@

case Op_LoadL:

case Op_LoadL_unaligned:

case Op_LoadPLocked:

- case Op_LoadLLocked:

case Op_LoadP:

case Op_LoadN:

case Op_LoadRange:

diff -r 8f37087fc13f src/share/vm/opto/library_call.cpp

--- a/src/share/vm/opto/library_call.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/opto/library_call.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -192,8 +192,6 @@

void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);

bool inline_native_clone(bool is_virtual);

bool inline_native_Reflection_getCallerClass();

- bool inline_native_AtomicLong_get();

- bool inline_native_AtomicLong_attemptUpdate();

bool is_method_invoke_or_aux_frame(JVMState* jvms);

// Helper function for inlining native object hash method

bool inline_native_hashcode(bool is_virtual, bool is_static);

@@ -331,11 +329,6 @@

// We do not intrinsify this. The optimizer does fine with it.

return NULL;

- case vmIntrinsics::_get_AtomicLong:

- case vmIntrinsics::_attemptUpdate:

- if (!InlineAtomicLong) return NULL;

- break;

-

case vmIntrinsics::_getCallerClass:

if (!UseNewReflection) return NULL;

if (!InlineReflectionGetCallerClass) return NULL;

@@ -711,11 +704,6 @@

case vmIntrinsics::_reverseBytes_c:

return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());

- case vmIntrinsics::_get_AtomicLong:

- return inline_native_AtomicLong_get();

- case vmIntrinsics::_attemptUpdate:

- return inline_native_AtomicLong_attemptUpdate();

-

case vmIntrinsics::_getCallerClass:

return inline_native_Reflection_getCallerClass();

@@ -4006,113 +3994,6 @@

return false;

}

-static int value_field_offset = -1; // offset of the "value" field of AtomicLongCSImpl. This is needed by

- // inline_native_AtomicLong_attemptUpdate() but it has no way of

- // computing it since there is no lookup field by name function in the

- // CI interface. This is computed and set by inline_native_AtomicLong_get().

- // Using a static variable here is safe even if we have multiple compilation

- // threads because the offset is constant. At worst the same offset will be

- // computed and stored multiple

-

-bool LibraryCallKit::inline_native_AtomicLong_get() {

- // Restore the stack and pop off the argument

- _sp+=1;

- Node *obj = pop();

-

- // get the offset of the "value" field. Since the CI interfaces

- // does not provide a way to look up a field by name, we scan the bytecodes

- // to get the field index. We expect the first 2 instructions of the method

- // to be:

- // 0 aload_0

- // 1 getfield "value"

- ciMethod* method = callee();

- if (value_field_offset == -1)

- {

- ciField* value_field;

- ciBytecodeStream iter(method);

- Bytecodes::Code bc = iter.next();

-

- if ((bc != Bytecodes::_aload_0) &&

- ((bc != Bytecodes::_aload) || (iter.get_index() != 0)))

- return false;

- bc = iter.next();

- if (bc != Bytecodes::_getfield)

- return false;

- bool ignore;

- value_field = iter.get_field(ignore);

- value_field_offset = value_field->offset_in_bytes();

- }

-

- // Null check without removing any arguments.

- _sp++;

- obj = do_null_check(obj, T_OBJECT);

- _sp--;

- // Check for locking null object

- if (stopped()) return true;

-

- Node *adr = basic_plus_adr(obj, obj, value_field_offset);

- const TypePtr *adr_type = _gvn.type(adr)->is_ptr();

- int alias_idx = C->get_alias_index(adr_type);

-

- Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));

-

- push_pair(result);

-

- return true;

-}

-

-bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {

- // Restore the stack and pop off the arguments

- _sp+=5;

- Node *newVal = pop_pair();

- Node *oldVal = pop_pair();

- Node *obj = pop();

-

- // we need the offset of the "value" field which was computed when

- // inlining the get() method. Give up if we don't have it.

- if (value_field_offset == -1)

- return false;

-

- // Null check without removing any arguments.

- _sp+=5;

- obj = do_null_check(obj, T_OBJECT);

- _sp-=5;

- // Check for locking null object

- if (stopped()) return true;

-

- Node *adr = basic_plus_adr(obj, obj, value_field_offset);

- const TypePtr *adr_type = _gvn.type(adr)->is_ptr();

- int alias_idx = C->get_alias_index(adr_type);

-

- Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));

- Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));

- set_memory(store_proj, alias_idx);

- Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );

-

- Node *result;

- // CMove node is not used to be able fold a possible check code

- // after attemptUpdate() call. This code could be transformed

- // into CMove node by loop optimizations.

- {

- RegionNode *r = new (C, 3) RegionNode(3);

- result = new (C, 3) PhiNode(r, TypeInt::BOOL);

-

- Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);

- Node *iftrue = opt_iff(r, iff);

- r->init_req(1, iftrue);

- result->init_req(1, intcon(1));

- result->init_req(2, intcon(0));

-

- set_control(_gvn.transform(r));

- record_for_igvn(r);

-

- C->set_has_split_ifs(true); // Has chance for split-if optimization

- }

-

- push(_gvn.transform(result));

- return true;

-}

-

bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {

// restore the arguments

_sp += arg_size();

diff -r 8f37087fc13f src/share/vm/opto/memnode.hpp

--- a/src/share/vm/opto/memnode.hpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/opto/memnode.hpp Wed Jun 06 01:15:36 2012 +0800

@@ -636,17 +636,6 @@

virtual bool depends_only_on_test() const { return true; }

};

-//------------------------------LoadLLockedNode---------------------------------

-// Load-locked a pointer from memory (either object or array).

-// On Sparc & Intel this is implemented as a normal long load.

-class LoadLLockedNode : public LoadLNode {

-public:

- LoadLLockedNode( Node *c, Node *mem, Node *adr )

- : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}

- virtual int Opcode() const;

- virtual int store_Opcode() const { return Op_StoreLConditional; }

-};

-

//------------------------------SCMemProjNode---------------------------------------

// This class defines a projection of the memory state of a store conditional node.

// These nodes return a value, but also update memory.

diff -r 8f37087fc13f src/share/vm/prims/jvm.h

--- a/src/share/vm/prims/jvm.h Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/prims/jvm.h Wed Jun 06 01:15:36 2012 +0800

@@ -634,7 +634,7 @@

JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused);

/*

- * sun.misc.AtomicLong

*/

JNIEXPORT jboolean JNICALL

JVM_SupportsCX8(void);

diff -r 8f37087fc13f src/share/vm/runtime/globals.hpp

--- a/src/share/vm/runtime/globals.hpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/runtime/globals.hpp Wed Jun 06 01:15:36 2012 +0800

@@ -631,9 +631,6 @@

develop(bool, InlineClassNatives, true, \

"inline Class.isInstance, etc") \

\

- develop(bool, InlineAtomicLong, true, \

- "inline sun.misc.AtomicLong") \

- \

develop(bool, InlineThreadNatives, true, \

"inline Thread.currentThread, etc") \

\

diff -r 8f37087fc13f src/share/vm/runtime/vmStructs.cpp

--- a/src/share/vm/runtime/vmStructs.cpp Tue Jun 05 10:15:27 2012 +0200

+++ b/src/share/vm/runtime/vmStructs.cpp Wed Jun 06 01:15:36 2012 +0800

@@ -1876,7 +1876,6 @@

declare_c2_type(StoreNNode, StoreNode) \

declare_c2_type(StoreCMNode, StoreNode) \

declare_c2_type(LoadPLockedNode, LoadPNode) \

- declare_c2_type(LoadLLockedNode, LoadLNode) \

declare_c2_type(SCMemProjNode, ProjNode) \

declare_c2_type(LoadStoreNode, Node) \

declare_c2_type(StorePConditionalNode, LoadStoreNode) \