(original) (raw)
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed Sep 16 15:17:52 2015** --- new/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed Sep 16 15:17:51 2015
*** 41,50 ****
--- 41,53 ----
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
+ #if INCLUDE_JVMCI
+ #include "jvmci/jvmciJavaClasses.hpp"
+ #endif
#define __ masm->
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
*** 103,113 **** --- 106,116 ---- DEF_XMM_OFFS(27), DEF_XMM_OFFS(28), DEF_XMM_OFFS(29), DEF_XMM_OFFS(30), DEF_XMM_OFFS(31), **! fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)wordSize / BytesPerInt),* fpu_stateH_end, r15_off, r15H_off, r14_off, r14H_off, r13_off, r13H_off, r12_off, r12H_off,
*** 153,180 **** --- 156,185 ---- static void restore_result_registers(MacroAssembler* masm); };
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
int vect_words = 0;
+ int ymmhi_offset = -1;
int num_xmm_regs = 16;
if (UseAVX > 2) {
num_xmm_regs = 32;
}
! #ifdef COMPILER2
! #if defined(COMPILER2) || INCLUDE_JVMCI
if (save_vectors) {
assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
// Save upper half of YMM registers
vect_words = 16 * num_xmm_regs / wordSize;
+ ymmhi_offset = additional_frame_words;
additional_frame_words += vect_words;
if (UseAVX > 2) {
// Save upper half of ZMM registers as well
additional_frame_words += vect_words;
}
}
#else
! assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif
// Always make the frame size 16-byte aligned
int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
reg_save_size*BytesPerInt, num_xmm_regs);
*** 280,289 **** --- 285,295 ----
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = new OopMap(frame_size_in_slots, 0);
#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots) + #define YMMHI_STACK_OFFSET(x) VMRegImpl::stack2reg((x / VMRegImpl::stack_slot_size) + ymmhi_offset)
map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
*** 332,341 **** --- 338,369 ---- map->set_callee_saved(STACK_OFFSET(xmm29_off), xmm29->as_VMReg()); map->set_callee_saved(STACK_OFFSET(xmm30_off), xmm30->as_VMReg()); map->set_callee_saved(STACK_OFFSET(xmm31_off), xmm31->as_VMReg()); }
+ #if defined(COMPILER2) || INCLUDE_JVMCI + if (save_vectors) { + assert(ymmhi_offset != -1, "save area must exist"); + map->set_callee_saved(YMMHI_STACK_OFFSET( 0), xmm0->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET( 16), xmm1->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET( 32), xmm2->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET( 48), xmm3->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET( 64), xmm4->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET( 80), xmm5->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET( 96), xmm6->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(112), xmm7->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(128), xmm8->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(144), xmm9->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(160), xmm10->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(176), xmm11->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(192), xmm12->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(208), xmm13->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(224), xmm14->as_VMReg()->next()->next()->next()->next()); + map->set_callee_saved(YMMHI_STACK_OFFSET(240), xmm15->as_VMReg()->next()->next()->next()->next()); + } + #endif + // %%% These should all be a waste but we'll keep things as they were for now if (true) { map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next()); map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next()); map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
*** 393,403 ****
--- 421,431 ----
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
if (frame::arg_reg_save_area_bytes != 0) {
// Pop arg register save area
__ addptr(rsp, frame::arg_reg_save_area_bytes);
}
! #ifdef COMPILER2
! #if defined(COMPILER2) || INCLUDE_JVMCI
if (restore_vectors) {
// Restore upper half of YMM registes (0..15)
assert(UseAVX > 0, "512bit vectors are supported only with AVX");
assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
__ vinsertf128h(xmm0, Address(rsp, 0));
*** 471,481 **** --- 499,509 ---- __ vinsertf64x4h(xmm31, Address(rsp, 992)); __ addptr(rsp, 1024); } } #else ! assert(!restore_vectors, "vectors are generated only by C2 and JVMCI"); #endif // Recover CPU state __ pop_CPU_state(); // Get the rbp described implicitly by the calling convention (no oopMap) __ pop(rbp);
*** 806,816 ****
--- 834,844 ----
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::below, L_ok);
__ bind(L_fail);
}
**! static void gen_i2c_adapter(MacroAssembler masm,*
**! void SharedRuntime::gen_i2c_adapter(MacroAssembler masm,*
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs) {
*** 903,912 **** --- 931,952 ----
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
__ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
+ #if INCLUDE_JVMCI + if (EnableJVMCI) { + // check if this call should be routed towards a specific entry point + __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0); + Label no_alternative_target; + __ jcc(Assembler::equal, no_alternative_target); + __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); + __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0); + __ bind(no_alternative_target); + } + #endif + // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. for (int i = 0; i < total_args_passed; i++) { if (sig_bt[i] == T_VOID) { // Longs and doubles are passed in native word order, but misaligned
*** 2836,2846 ****
--- 2876,2892 ----
//------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() {
// Allocate space for the code
ResourceMark rm;
// Setup code generation tools
! CodeBuffer buffer("deopt_blob", 2048, 1024);
! int pad = 0;
+ #if INCLUDE_JVMCI
+ if (EnableJVMCI) {
+ pad += 512; // Increase the buffer size when compiling for JVMCI
+ }
+ #endif
+ CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words;
OopMap* map = NULL;
OopMapSet *oop_maps = new OopMapSet();
*** 2885,2904 **** --- 2931,2988 ---- // Normal deoptimization. Save exec mode for unpack_frames. __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved __ jmp(cont);
int reexecute_offset = __ pc() - start;
+ #if INCLUDE_JVMCI && !defined(COMPILER1) + if (EnableJVMCI && UseJVMCICompiler) { + // JVMCI does not use this kind of deoptimization + __ should_not_reach_here(); + } + #endif
// Reexecute case
// return address is the pc describes what bci to do re-execute at
// No need to update map as each call to save_live_registers will produce identical oopmap
(void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
__ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
__ jmp(cont);
+ #if INCLUDE_JVMCI + Label after_fetch_unroll_info_call; + int implicit_exception_uncommon_trap_offset = 0; + int uncommon_trap_offset = 0; + + if (EnableJVMCI) { + implicit_exception_uncommon_trap_offset = __ pc() - start; + + __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); + __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD); + + uncommon_trap_offset = __ pc() - start; + + // Save everything in sight. + RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); + // fetch_unroll_info needs to call last_java_frame() + __ set_last_Java_frame(noreg, noreg, NULL); + + __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset()))); + __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1); + + __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute); + __ mov(c_rarg0, r15_thread); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); + oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); + + __ reset_last_Java_frame(false, false); + + __ jmp(after_fetch_unroll_info_call); + } // EnableJVMCI + #endif // INCLUDE_JVMCI + int exception_offset = __ pc() - start;
// Prolog for exception case
// all registers are dead at this entry point, except for rax, and
*** 2980,2989 **** --- 3064,3079 ---- // find any register it might need. oop_maps->add_gc_map(__ pc() - start, map);
__ reset_last_Java_frame(false, false);
+ #if INCLUDE_JVMCI + if (EnableJVMCI) { + __ bind(after_fetch_unroll_info_call); + } + #endif + // Load UnrollBlock* into rdi __ mov(rdi, rax);
Label noException;
__ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
*** 3154,3163 **** --- 3244,3259 ---- // Make sure all code is generated masm->flush();
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
+ #if INCLUDE_JVMCI + if (EnableJVMCI) { + _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); + _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); + } + #endif }
#ifdef COMPILER2 //------------------------------generate_uncommon_trap_blob-------------------- void SharedRuntime::generate_uncommon_trap_blob() {
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File