diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp index 71d2898f45c7f..2d842b24663c2 100644 --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp @@ -37,66 +37,12 @@ #define __ ce->masm()-> -#ifndef _LP64 -float ConversionStub::float_zero = 0.0; -double ConversionStub::double_zero = 0.0; - -void ConversionStub::emit_code(LIR_Assembler* ce) { - __ bind(_entry); - assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub"); - - - if (input()->is_single_xmm()) { - __ comiss(input()->as_xmm_float_reg(), - ExternalAddress((address)&float_zero)); - } else if (input()->is_double_xmm()) { - __ comisd(input()->as_xmm_double_reg(), - ExternalAddress((address)&double_zero)); - } else { - __ push(rax); - __ ftst(); - __ fnstsw_ax(); - __ sahf(); - __ pop(rax); - } - - Label NaN, do_return; - __ jccb(Assembler::parity, NaN); - __ jccb(Assembler::below, do_return); - - // input is > 0 -> return maxInt - // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff - __ decrement(result()->as_register()); - __ jmpb(do_return); - - // input is NaN -> return 0 - __ bind(NaN); - __ xorptr(result()->as_register(), result()->as_register()); - - __ bind(do_return); - __ jmp(_continuation); -} -#endif // !_LP64 - void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset()); -#ifdef _LP64 __ lea(rscratch1, safepoint_pc); __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1); -#else - const Register tmp1 = rcx; - const Register tmp2 = rdx; - __ push(tmp1); - __ push(tmp2); - - __ lea(tmp1, safepoint_pc); - __ get_thread(tmp2); - __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1); - - __ pop(tmp2); - __ pop(tmp1); -#endif /* _LP64 */ + assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, "polling page return stub not created yet"); diff --git a/src/hotspot/cpu/x86/c1_Defs_x86.hpp b/src/hotspot/cpu/x86/c1_Defs_x86.hpp index 1637789e79884..bfb885a1b7301 100644 --- a/src/hotspot/cpu/x86/c1_Defs_x86.hpp +++ b/src/hotspot/cpu/x86/c1_Defs_x86.hpp @@ -33,15 +33,11 @@ enum { // registers enum { - pd_nof_cpu_regs_frame_map = NOT_LP64(8) LP64_ONLY(16), // number of registers used during code emission + pd_nof_cpu_regs_frame_map = 16, // number of registers used during code emission pd_nof_fpu_regs_frame_map = FloatRegister::number_of_registers, // number of registers used during code emission pd_nof_xmm_regs_frame_map = XMMRegister::number_of_registers, // number of registers used during code emission -#ifdef _LP64 #define UNALLOCATED 4 // rsp, rbp, r15, r10 -#else - #define UNALLOCATED 2 // rsp, rbp -#endif // LP64 pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - UNALLOCATED, // number of registers killed by calls pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map, // number of registers killed by calls @@ -54,9 +50,9 @@ enum { pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan pd_first_cpu_reg = 0, - pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11), - pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0), - pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11), + pd_last_cpu_reg = 11, + pd_first_byte_reg = 0, + pd_last_byte_reg = 11, pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, pd_last_fpu_reg = pd_first_fpu_reg + 7, pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map, diff --git a/src/hotspot/cpu/x86/c1_FrameMap_x86.cpp b/src/hotspot/cpu/x86/c1_FrameMap_x86.cpp index e3c7879260266..bdbab432180bd 100644 --- a/src/hotspot/cpu/x86/c1_FrameMap_x86.cpp +++ b/src/hotspot/cpu/x86/c1_FrameMap_x86.cpp @@ -32,7 +32,6 @@ const int FrameMap::pd_c_runtime_reserved_arg_size = 0; LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { LIR_Opr opr = LIR_OprFact::illegalOpr; VMReg r_1 = reg->first(); - VMReg r_2 = reg->second(); if (r_1->is_stack()) { // Convert stack slot to an SP offset // The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value @@ -41,14 +40,8 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type)); } else if (r_1->is_Register()) { Register reg = r_1->as_Register(); - if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) { - Register reg2 = r_2->as_Register(); -#ifdef _LP64 - assert(reg2 == reg, "must be same register"); + if (type == T_LONG || type == T_DOUBLE) { opr = as_long_opr(reg); -#else - opr = as_long_opr(reg2, reg); -#endif // _LP64 } else if (is_reference_type(type)) { opr = as_oop_opr(reg); } else if (type == T_METADATA) { @@ -111,8 +104,6 @@ LIR_Opr FrameMap::long1_opr; LIR_Opr FrameMap::xmm0_float_opr; LIR_Opr FrameMap::xmm0_double_opr; -#ifdef _LP64 - LIR_Opr FrameMap::r8_opr; LIR_Opr FrameMap::r9_opr; LIR_Opr FrameMap::r10_opr; @@ -137,7 +128,6 @@ LIR_Opr FrameMap::r11_metadata_opr; LIR_Opr FrameMap::r12_metadata_opr; LIR_Opr FrameMap::r13_metadata_opr; LIR_Opr FrameMap::r14_metadata_opr; -#endif // _LP64 LIR_Opr FrameMap::_caller_save_cpu_regs[] = {}; LIR_Opr FrameMap::_caller_save_fpu_regs[] = {}; @@ -157,23 +147,17 @@ XMMRegister FrameMap::nr2xmmreg(int rnr) { void FrameMap::initialize() { assert(!_init_done, "once"); - assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers"); + assert(nof_cpu_regs == 16, "wrong number of CPU registers"); map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); - -#ifndef _LP64 - // The unallocatable registers are at the end - map_register(6, rsp); - map_register(7, rbp); -#else - map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); - map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); - map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); - map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); + map_register(6, r8); r8_opr = LIR_OprFact::single_cpu(6); + map_register(7, r9); r9_opr = LIR_OprFact::single_cpu(7); + map_register(8, r11); r11_opr = LIR_OprFact::single_cpu(8); + map_register(9, r13); r13_opr = LIR_OprFact::single_cpu(9); map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); // r12 is allocated conditionally. With compressed oops it holds // the heapbase value and is not visible to the allocator. @@ -183,15 +167,9 @@ void FrameMap::initialize() { map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); map_register(14, rsp); map_register(15, rbp); -#endif // _LP64 -#ifdef _LP64 long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/); long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/); -#else - long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/); - long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/); -#endif // _LP64 xmm0_float_opr = LIR_OprFact::single_xmm(0); xmm0_double_opr = LIR_OprFact::double_xmm(0); @@ -201,16 +179,12 @@ void FrameMap::initialize() { _caller_save_cpu_regs[3] = rax_opr; _caller_save_cpu_regs[4] = rdx_opr; _caller_save_cpu_regs[5] = rcx_opr; - -#ifdef _LP64 _caller_save_cpu_regs[6] = r8_opr; _caller_save_cpu_regs[7] = r9_opr; _caller_save_cpu_regs[8] = r11_opr; _caller_save_cpu_regs[9] = r13_opr; _caller_save_cpu_regs[10] = r14_opr; _caller_save_cpu_regs[11] = r12_opr; -#endif // _LP64 - _xmm_regs[0] = xmm0; _xmm_regs[1] = xmm1; @@ -220,8 +194,6 @@ void FrameMap::initialize() { _xmm_regs[5] = xmm5; _xmm_regs[6] = xmm6; _xmm_regs[7] = xmm7; - -#ifdef _LP64 _xmm_regs[8] = xmm8; _xmm_regs[9] = xmm9; _xmm_regs[10] = xmm10; @@ -246,7 +218,6 @@ void FrameMap::initialize() { _xmm_regs[29] = xmm29; _xmm_regs[30] = xmm30; _xmm_regs[31] = xmm31; -#endif // _LP64 for (int i = 0; i < 8; i++) { _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); @@ -276,7 +247,6 @@ void FrameMap::initialize() { rsp_opr = as_pointer_opr(rsp); rbp_opr = as_pointer_opr(rbp); -#ifdef _LP64 r8_oop_opr = as_oop_opr(r8); r9_oop_opr = as_oop_opr(r9); r11_oop_opr = as_oop_opr(r11); @@ -290,7 +260,6 @@ void FrameMap::initialize() { r12_metadata_opr = as_metadata_opr(r12); r13_metadata_opr = as_metadata_opr(r13); r14_metadata_opr = as_metadata_opr(r14); -#endif // _LP64 VMRegPair regs; BasicType sig_bt = T_OBJECT; diff --git a/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp b/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp index ce892efbed243..08b872cb0951d 100644 --- a/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp +++ b/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp @@ -41,13 +41,8 @@ nof_xmm_regs = pd_nof_xmm_regs_frame_map, nof_caller_save_xmm_regs = pd_nof_caller_save_xmm_regs_frame_map, first_available_sp_in_frame = 0, -#ifndef _LP64 - frame_pad_in_bytes = 8, - nof_reg_args = 2 -#else frame_pad_in_bytes = 16, nof_reg_args = 6 -#endif // _LP64 }; private: @@ -81,8 +76,6 @@ static LIR_Opr rdx_metadata_opr; static LIR_Opr rcx_metadata_opr; -#ifdef _LP64 - static LIR_Opr r8_opr; static LIR_Opr r9_opr; static LIR_Opr r10_opr; @@ -108,28 +101,17 @@ static LIR_Opr r13_metadata_opr; static LIR_Opr r14_metadata_opr; -#endif // _LP64 - static LIR_Opr long0_opr; static LIR_Opr long1_opr; static LIR_Opr xmm0_float_opr; static LIR_Opr xmm0_double_opr; -#ifdef _LP64 static LIR_Opr as_long_opr(Register r) { return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); } static LIR_Opr as_pointer_opr(Register r) { return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); } -#else - static LIR_Opr as_long_opr(Register r, Register r2) { - return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2)); - } - static LIR_Opr as_pointer_opr(Register r) { - return LIR_OprFact::single_cpu(cpu_reg2rnr(r)); - } -#endif // _LP64 // VMReg name for spilled physical FPU stack slot n static VMReg fpu_regname (int n); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index ed16f81cba18a..fa1bfaa71dbac 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -169,7 +169,6 @@ void LIR_Assembler::push(LIR_Opr opr) { if (opr->is_single_cpu()) { __ push_reg(opr->as_register()); } else if (opr->is_double_cpu()) { - NOT_LP64(__ push_reg(opr->as_register_hi())); __ push_reg(opr->as_register_lo()); } else if (opr->is_stack()) { __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); @@ -325,11 +324,9 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) { Label L_skip_barrier; Register klass = rscratch1; - Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg ); - assert(thread != noreg, "x86_32 not implemented"); __ mov_metadata(klass, method->holder()->constant_encoding()); - __ clinit_barrier(klass, thread, &L_skip_barrier /*L_fast_path*/); + __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/); __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); @@ -401,11 +398,9 @@ int LIR_Assembler::emit_unwind_handler() { int offset = code_offset(); // Fetch the exception from TLS and clear out exception related thread state - Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); - NOT_LP64(__ get_thread(thread)); - __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); - __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); - __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); + __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); + __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD); + __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD); __ bind(_unwind_handler_entry); __ verify_not_null_oop(rax); @@ -427,14 +422,8 @@ int LIR_Assembler::emit_unwind_handler() { } if (compilation()->env()->dtrace_method_probes()) { -#ifdef _LP64 __ mov(rdi, r15_thread); __ mov_metadata(rsi, method()->constant_encoding()); -#else - __ get_thread(rax); - __ movptr(Address(rsp, 0), rax); - __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg); -#endif __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); } @@ -491,15 +480,9 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { // Note: we do not need to round double result; float result has the right precision // the poll sets the condition code, but no data registers -#ifdef _LP64 - const Register thread = r15_thread; -#else - const Register thread = rbx; - __ get_thread(thread); -#endif code_stub->set_safepoint_offset(__ offset()); __ relocate(relocInfo::poll_return_type); - __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */); + __ safepoint_poll(*code_stub->entry(), r15_thread, true /* at_return */, true /* in_nmethod */); __ ret(0); } @@ -507,21 +490,14 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { guarantee(info != nullptr, "Shouldn't be null"); int offset = __ offset(); -#ifdef _LP64 const Register poll_addr = rscratch1; __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset())); -#else - assert(tmp->is_cpu_register(), "needed"); - const Register poll_addr = tmp->as_register(); - __ get_thread(poll_addr); - __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset()))); -#endif add_debug_info_for_branch(info); __ relocate(relocInfo::poll_type); address pre_pc = __ pc(); __ testl(rax, Address(poll_addr, 0)); address post_pc = __ pc(); - guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length"); + guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length"); return offset; } @@ -555,12 +531,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod case T_LONG: { assert(patch_code == lir_patch_none, "no patching handled here"); -#ifdef _LP64 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); -#else - __ movptr(dest->as_register_lo(), c->as_jint_lo()); - __ movptr(dest->as_register_hi(), c->as_jint_hi()); -#endif // _LP64 break; } @@ -636,17 +607,10 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { case T_LONG: // fall through case T_DOUBLE: -#ifdef _LP64 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits(), rscratch1); -#else - __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), - lo_word_offset_in_bytes), c->as_jint_lo_bits()); - __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), - hi_word_offset_in_bytes), c->as_jint_hi_bits()); -#endif // _LP64 break; default: @@ -677,20 +641,15 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi if (UseCompressedOops && !wide) { __ movl(as_Address(addr), NULL_WORD); } else { -#ifdef _LP64 __ xorptr(rscratch1, rscratch1); null_check_here = code_offset(); __ movptr(as_Address(addr), rscratch1); -#else - __ movptr(as_Address(addr), NULL_WORD); -#endif } } else { if (is_literal_address(addr)) { ShouldNotReachHere(); __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1); } else { -#ifdef _LP64 __ movoop(rscratch1, c->as_jobject()); if (UseCompressedOops && !wide) { __ encode_heap_oop(rscratch1); @@ -700,16 +659,12 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi null_check_here = code_offset(); __ movptr(as_Address_lo(addr), rscratch1); } -#else - __ movoop(as_Address(addr), c->as_jobject(), noreg); -#endif } } break; case T_LONG: // fall through case T_DOUBLE: -#ifdef _LP64 if (is_literal_address(addr)) { ShouldNotReachHere(); __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); @@ -718,11 +673,6 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi null_check_here = code_offset(); __ movptr(as_Address_lo(addr), r10); } -#else - // Always reachable in 32bit so this doesn't produce useless move literal - __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); - __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); -#endif // _LP64 break; case T_BOOLEAN: // fall through @@ -751,13 +701,11 @@ void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { // move between cpu-registers if (dest->is_single_cpu()) { -#ifdef _LP64 if (src->type() == T_LONG) { // Can do LONG -> OBJECT move_regs(src->as_register_lo(), dest->as_register()); return; } -#endif assert(src->is_single_cpu(), "must match"); if (src->type() == T_OBJECT) { __ verify_oop(src->as_register()); @@ -765,39 +713,20 @@ void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { -#ifdef _LP64 if (is_reference_type(src->type())) { // Surprising to me but we can see move of a long to t_object __ verify_oop(src->as_register()); move_regs(src->as_register(), dest->as_register_lo()); return; } -#endif assert(src->is_double_cpu(), "must match"); Register f_lo = src->as_register_lo(); Register f_hi = src->as_register_hi(); Register t_lo = dest->as_register_lo(); Register t_hi = dest->as_register_hi(); -#ifdef _LP64 assert(f_hi == f_lo, "must be same"); assert(t_hi == t_lo, "must be same"); move_regs(f_lo, t_lo); -#else - assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); - - - if (f_lo == t_hi && f_hi == t_lo) { - swap_reg(f_lo, f_hi); - } else if (f_hi == t_lo) { - assert(f_lo != t_hi, "overwriting register"); - move_regs(f_hi, t_hi); - move_regs(f_lo, t_lo); - } else { - assert(f_hi != t_lo, "overwriting register"); - move_regs(f_lo, t_lo); - move_regs(f_hi, t_hi); - } -#endif // LP64 // move between xmm-registers } else if (dest->is_single_xmm()) { @@ -831,7 +760,6 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); __ movptr (dstLO, src->as_register_lo()); - NOT_LP64(__ movptr (dstHI, src->as_register_hi())); } else if (src->is_single_xmm()) { Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); @@ -854,7 +782,6 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch if (is_reference_type(type)) { __ verify_oop(src->as_register()); -#ifdef _LP64 if (UseCompressedOops && !wide) { __ movptr(compressed_src, src->as_register()); __ encode_heap_oop(compressed_src); @@ -862,7 +789,6 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); } } -#endif } if (patch_code != lir_patch_none) { @@ -893,14 +819,6 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch __ movptr(as_Address(to_addr), src->as_register()); } break; - case T_METADATA: - // We get here to store a method pointer to the stack to pass to - // a dtrace runtime call. This can't work on 64 bit with - // compressed klass ptrs: T_METADATA can be a compressed klass - // ptr or a 64 bit method pointer. - LP64_ONLY(ShouldNotReachHere()); - __ movptr(as_Address(to_addr), src->as_register()); - break; case T_ADDRESS: __ movptr(as_Address(to_addr), src->as_register()); break; @@ -911,35 +829,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch case T_LONG: { Register from_lo = src->as_register_lo(); Register from_hi = src->as_register_hi(); -#ifdef _LP64 __ movptr(as_Address_lo(to_addr), from_lo); -#else - Register base = to_addr->base()->as_register(); - Register index = noreg; - if (to_addr->index()->is_register()) { - index = to_addr->index()->as_register(); - } - if (base == from_lo || index == from_lo) { - assert(base != from_hi, "can't be"); - assert(index == noreg || (index != base && index != from_hi), "can't handle this"); - __ movl(as_Address_hi(to_addr), from_hi); - if (patch != nullptr) { - patching_epilog(patch, lir_patch_high, base, info); - patch = new PatchingStub(_masm, PatchingStub::access_field_id); - patch_code = lir_patch_low; - } - __ movl(as_Address_lo(to_addr), from_lo); - } else { - assert(index == noreg || (index != base && index != from_lo), "can't handle this"); - __ movl(as_Address_lo(to_addr), from_lo); - if (patch != nullptr) { - patching_epilog(patch, lir_patch_low, base, info); - patch = new PatchingStub(_masm, PatchingStub::access_field_id); - patch_code = lir_patch_high; - } - __ movl(as_Address_hi(to_addr), from_hi); - } -#endif // _LP64 break; } @@ -988,7 +878,6 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); __ movptr(dest->as_register_lo(), src_addr_LO); - NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); } else if (dest->is_single_xmm()) { Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); @@ -1010,27 +899,14 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); } else { -#ifndef _LP64 - __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); - __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); -#else //no pushl on 64bits __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); -#endif } } else if (src->is_double_stack()) { -#ifdef _LP64 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); -#else - __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); - // push and pop the part at src + wordSize, adding wordSize for the previous push - __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); - __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); - __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); -#endif // _LP64 } else { ShouldNotReachHere(); @@ -1113,44 +989,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch case T_LONG: { Register to_lo = dest->as_register_lo(); Register to_hi = dest->as_register_hi(); -#ifdef _LP64 __ movptr(to_lo, as_Address_lo(addr)); -#else - Register base = addr->base()->as_register(); - Register index = noreg; - if (addr->index()->is_register()) { - index = addr->index()->as_register(); - } - if ((base == to_lo && index == to_hi) || - (base == to_hi && index == to_lo)) { - // addresses with 2 registers are only formed as a result of - // array access so this code will never have to deal with - // patches or null checks. - assert(info == nullptr && patch == nullptr, "must be"); - __ lea(to_hi, as_Address(addr)); - __ movl(to_lo, Address(to_hi, 0)); - __ movl(to_hi, Address(to_hi, BytesPerWord)); - } else if (base == to_lo || index == to_lo) { - assert(base != to_hi, "can't be"); - assert(index == noreg || (index != base && index != to_hi), "can't handle this"); - __ movl(to_hi, as_Address_hi(addr)); - if (patch != nullptr) { - patching_epilog(patch, lir_patch_high, base, info); - patch = new PatchingStub(_masm, PatchingStub::access_field_id); - patch_code = lir_patch_low; - } - __ movl(to_lo, as_Address_lo(addr)); - } else { - assert(index == noreg || (index != base && index != to_lo), "can't handle this"); - __ movl(to_lo, as_Address_lo(addr)); - if (patch != nullptr) { - patching_epilog(patch, lir_patch_low, base, info); - patch = new PatchingStub(_masm, PatchingStub::access_field_id); - patch_code = lir_patch_high; - } - __ movl(to_hi, as_Address_hi(addr)); - } -#endif // _LP64 break; } @@ -1200,11 +1039,9 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch } if (is_reference_type(type)) { -#ifdef _LP64 if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); } -#endif __ verify_oop(dest->as_register()); } @@ -1299,21 +1136,11 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { switch (op->bytecode()) { case Bytecodes::_i2l: -#ifdef _LP64 __ movl2ptr(dest->as_register_lo(), src->as_register()); -#else - move_regs(src->as_register(), dest->as_register_lo()); - move_regs(src->as_register(), dest->as_register_hi()); - __ sarl(dest->as_register_hi(), 31); -#endif // LP64 break; case Bytecodes::_l2i: -#ifdef _LP64 __ movl(dest->as_register(), src->as_register_lo()); -#else - move_regs(src->as_register_lo(), dest->as_register()); -#endif break; case Bytecodes::_i2b: @@ -1396,7 +1223,7 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { Register len = op->len()->as_register(); - LP64_ONLY( __ movslq(len, len); ) + __ movslq(len, len); if (UseSlowPath || (!UseFastNewObjectArray && is_reference_type(op->type())) || @@ -1464,7 +1291,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L Register dst = op->result_opr()->as_register(); ciKlass* k = op->klass(); Register Rtmp1 = noreg; - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + Register tmp_load_klass = rscratch1; // check if it needs to be profiled ciMethodData* md = nullptr; @@ -1526,29 +1353,19 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L if (!k->is_loaded()) { klass2reg_with_patching(k_RInfo, op->info_for_patch()); } else { -#ifdef _LP64 __ mov_metadata(k_RInfo, k->constant_encoding()); -#endif // _LP64 } __ verify_oop(obj); if (op->fast_check()) { // get object class // not a safepoint as obj null check happens earlier -#ifdef _LP64 if (UseCompressedClassPointers) { __ load_klass(Rtmp1, obj, tmp_load_klass); __ cmpptr(k_RInfo, Rtmp1); } else { __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); } -#else - if (k->is_loaded()) { - __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); - } else { - __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); - } -#endif __ jcc(Assembler::notEqual, *failure_target); // successful cast, fall through to profile or jump } else { @@ -1557,11 +1374,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ load_klass(klass_RInfo, obj, tmp_load_klass); if (k->is_loaded()) { // See if we get an immediate positive hit -#ifdef _LP64 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); -#else - __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); -#endif // _LP64 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { __ jcc(Assembler::notEqual, *failure_target); // successful cast, fall through to profile or jump @@ -1569,19 +1382,11 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L // See if we get an immediate positive hit __ jcc(Assembler::equal, *success_target); // check for self -#ifdef _LP64 __ cmpptr(klass_RInfo, k_RInfo); -#else - __ cmpklass(klass_RInfo, k->constant_encoding()); -#endif // _LP64 __ jcc(Assembler::equal, *success_target); __ push(klass_RInfo); -#ifdef _LP64 __ push(k_RInfo); -#else - __ pushklass(k->constant_encoding(), noreg); -#endif // _LP64 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ pop(klass_RInfo); __ pop(klass_RInfo); @@ -1610,7 +1415,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + Register tmp_load_klass = rscratch1; LIR_Code code = op->code(); if (code == lir_store_check) { Register value = op->object()->as_register(); @@ -1714,17 +1519,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { - if (LP64_ONLY(false &&) op->code() == lir_cas_long) { - assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); - assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); - assert(op->new_value()->as_register_lo() == rbx, "wrong register"); - assert(op->new_value()->as_register_hi() == rcx, "wrong register"); - Register addr = op->addr()->as_register(); - __ lock(); - NOT_LP64(__ cmpxchg8(Address(addr, 0))); - - } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { - NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) + if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); Register newval = op->new_value()->as_register(); Register cmpval = op->cmp_value()->as_register(); @@ -1734,8 +1529,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { assert(cmpval != addr, "cmp and addr must be in different registers"); assert(newval != addr, "new value and addr must be in different registers"); - if ( op->code() == lir_cas_obj) { -#ifdef _LP64 + if (op->code() == lir_cas_obj) { if (UseCompressedOops) { __ encode_heap_oop(cmpval); __ mov(rscratch1, newval); @@ -1743,9 +1537,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { __ lock(); // cmpval (rax) is implicitly used by this instruction __ cmpxchgl(rscratch1, Address(addr, 0)); - } else -#endif - { + } else { __ lock(); __ cmpxchgptr(newval, Address(addr, 0)); } @@ -1754,7 +1546,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { __ lock(); __ cmpxchgl(newval, Address(addr, 0)); } -#ifdef _LP64 } else if (op->code() == lir_cas_long) { Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); Register newval = op->new_value()->as_register_lo(); @@ -1766,7 +1557,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { assert(newval != addr, "new value and addr must be in different registers"); __ lock(); __ cmpxchgq(newval, Address(addr, 0)); -#endif // _LP64 } else { Unimplemented(); } @@ -1809,12 +1599,10 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); - NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) } else if (opr2->is_single_stack()) { __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); } else if (opr2->is_double_stack()) { __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); - NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) } else { ShouldNotReachHere(); } @@ -1890,28 +1678,16 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr // cpu register - cpu register Register rreg_lo = right->as_register_lo(); Register rreg_hi = right->as_register_hi(); - NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); - LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); + assert_different_registers(lreg_lo, rreg_lo); switch (code) { case lir_add: __ addptr(lreg_lo, rreg_lo); - NOT_LP64(__ adcl(lreg_hi, rreg_hi)); break; case lir_sub: __ subptr(lreg_lo, rreg_lo); - NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); break; case lir_mul: -#ifdef _LP64 __ imulq(lreg_lo, rreg_lo); -#else - assert(lreg_lo == rax && lreg_hi == rdx, "must be"); - __ imull(lreg_hi, rreg_lo); - __ imull(rreg_hi, lreg_lo); - __ addl (rreg_hi, lreg_hi); - __ mull (rreg_lo); - __ addl (lreg_hi, rreg_hi); -#endif // _LP64 break; default: ShouldNotReachHere(); @@ -1919,7 +1695,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr } else if (right->is_constant()) { // cpu register - constant -#ifdef _LP64 jlong c = right->as_constant_ptr()->as_jlong_bits(); __ movptr(r10, (intptr_t) c); switch (code) { @@ -1932,22 +1707,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr default: ShouldNotReachHere(); } -#else - jint c_lo = right->as_constant_ptr()->as_jint_lo(); - jint c_hi = right->as_constant_ptr()->as_jint_hi(); - switch (code) { - case lir_add: - __ addptr(lreg_lo, c_lo); - __ adcl(lreg_hi, c_hi); - break; - case lir_sub: - __ subptr(lreg_lo, c_lo); - __ sbbl(lreg_hi, c_hi); - break; - default: - ShouldNotReachHere(); - } -#endif // _LP64 } else { ShouldNotReachHere(); @@ -2123,7 +1882,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr Register l_lo = left->as_register_lo(); Register l_hi = left->as_register_hi(); if (right->is_constant()) { -#ifdef _LP64 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); switch (code) { case lir_logic_and: @@ -2137,50 +1895,22 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr break; default: ShouldNotReachHere(); } -#else - int r_lo = right->as_constant_ptr()->as_jint_lo(); - int r_hi = right->as_constant_ptr()->as_jint_hi(); - switch (code) { - case lir_logic_and: - __ andl(l_lo, r_lo); - __ andl(l_hi, r_hi); - break; - case lir_logic_or: - __ orl(l_lo, r_lo); - __ orl(l_hi, r_hi); - break; - case lir_logic_xor: - __ xorl(l_lo, r_lo); - __ xorl(l_hi, r_hi); - break; - default: ShouldNotReachHere(); - } -#endif // _LP64 } else { -#ifdef _LP64 Register r_lo; if (is_reference_type(right->type())) { r_lo = right->as_register(); } else { r_lo = right->as_register_lo(); } -#else - Register r_lo = right->as_register_lo(); - Register r_hi = right->as_register_hi(); - assert(l_lo != r_hi, "overwriting registers"); -#endif switch (code) { case lir_logic_and: __ andptr(l_lo, r_lo); - NOT_LP64(__ andptr(l_hi, r_hi);) break; case lir_logic_or: __ orptr(l_lo, r_lo); - NOT_LP64(__ orptr(l_hi, r_hi);) break; case lir_logic_xor: __ xorptr(l_lo, r_lo); - NOT_LP64(__ xorptr(l_hi, r_hi);) break; default: ShouldNotReachHere(); } @@ -2189,19 +1919,7 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr Register dst_lo = dst->as_register_lo(); Register dst_hi = dst->as_register_hi(); -#ifdef _LP64 move_regs(l_lo, dst_lo); -#else - if (dst_lo == l_hi) { - assert(dst_hi != l_lo, "overwriting registers"); - move_regs(l_hi, dst_hi); - move_regs(l_lo, dst_lo); - } else { - assert(dst_lo != l_hi, "overwriting registers"); - move_regs(l_lo, dst_lo); - move_regs(l_hi, dst_hi); - } -#endif // _LP64 } } @@ -2329,27 +2047,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, Register xlo = opr1->as_register_lo(); Register xhi = opr1->as_register_hi(); if (opr2->is_double_cpu()) { -#ifdef _LP64 __ cmpptr(xlo, opr2->as_register_lo()); -#else - // cpu register - cpu register - Register ylo = opr2->as_register_lo(); - Register yhi = opr2->as_register_hi(); - __ subl(xlo, ylo); - __ sbbl(xhi, yhi); - if (condition == lir_cond_equal || condition == lir_cond_notEqual) { - __ orl(xhi, xlo); - } -#endif // _LP64 } else if (opr2->is_constant()) { // cpu register - constant 0 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); -#ifdef _LP64 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); -#else - assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); - __ orl(xhi, xlo); -#endif // _LP64 } else { ShouldNotReachHere(); } @@ -2398,12 +2100,10 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, } else if (opr1->is_address() && opr2->is_constant()) { LIR_Const* c = opr2->as_constant_ptr(); -#ifdef _LP64 if (is_reference_type(c->type())) { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); __ movoop(rscratch1, c->as_jobject()); } -#endif // LP64 if (op->info() != nullptr) { add_debug_info_for_null_check_here(op->info()); } @@ -2412,13 +2112,9 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, if (c->type() == T_INT) { __ cmpl(as_Address(addr), c->as_jint()); } else if (is_reference_type(c->type())) { -#ifdef _LP64 // %%% Make this explode if addr isn't reachable until we figure out a // better strategy by giving noreg as the temp for as_Address __ cmpoop(rscratch1, as_Address(addr, noreg)); -#else - __ cmpoop(as_Address(addr), c->as_jobject()); -#endif // _LP64 } else { ShouldNotReachHere(); } @@ -2442,7 +2138,6 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op } } else { assert(code == lir_cmp_l2i, "check"); -#ifdef _LP64 Label done; Register dest = dst->as_register(); __ cmpptr(left->as_register_lo(), right->as_register_lo()); @@ -2451,13 +2146,6 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op __ setb(Assembler::notZero, dest); __ movzbl(dest, dest); __ bind(done); -#else - __ lcmp2int(left->as_register_hi(), - left->as_register_lo(), - right->as_register_hi(), - right->as_register_lo()); - move_regs(left->as_register_hi(), dst->as_register()); -#endif // _LP64 } } @@ -2583,22 +2271,12 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr Register lo = left->as_register_lo(); Register hi = left->as_register_hi(); assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); -#ifdef _LP64 switch (code) { case lir_shl: __ shlptr(lo); break; case lir_shr: __ sarptr(lo); break; case lir_ushr: __ shrptr(lo); break; default: ShouldNotReachHere(); } -#else - - switch (code) { - case lir_shl: __ lshl(hi, lo); break; - case lir_shr: __ lshr(hi, lo, true); break; - case lir_ushr: __ lshr(hi, lo, false); break; - default: ShouldNotReachHere(); - } -#endif // LP64 } else { ShouldNotReachHere(); } @@ -2619,9 +2297,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de default: ShouldNotReachHere(); } } else if (dest->is_double_cpu()) { -#ifndef _LP64 - Unimplemented(); -#else // first move left into dest so that left is not destroyed by the shift Register value = dest->as_register_lo(); count = count & 0x1F; // Java spec @@ -2633,7 +2308,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de case lir_ushr: __ shrptr(value, count); break; default: ShouldNotReachHere(); } -#endif // _LP64 } else { ShouldNotReachHere(); } @@ -2683,7 +2357,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Register dst_pos = op->dst_pos()->as_register(); Register length = op->length()->as_register(); Register tmp = op->tmp()->as_register(); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + Register tmp_load_klass = rscratch1; Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg; CodeStub* stub = op->stub(); @@ -2708,13 +2382,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // these are just temporary placements until we need to reload store_parameter(src_pos, 3); store_parameter(src, 4); - NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) address copyfunc_addr = StubRoutines::generic_arraycopy(); assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint -#ifdef _LP64 // The arguments are in java calling convention so we can trivially shift them to C // convention assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); @@ -2745,21 +2417,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { #endif __ call(RuntimeAddress(copyfunc_addr)); #endif // _WIN64 -#else - __ push(length); - __ push(dst_pos); - __ push(dst); - __ push(src_pos); - __ push(src); - -#ifndef PRODUCT - if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); - } -#endif - __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack - -#endif // _LP64 __ testl(rax, rax); __ jcc(Assembler::equal, *stub->continuation()); @@ -2865,10 +2522,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ jcc(Assembler::less, *stub->entry()); } -#ifdef _LP64 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null -#endif if (flags & LIR_OpArrayCopy::type_check) { // We don't know the array types are compatible @@ -2932,21 +2587,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { store_parameter(src_pos, 3); store_parameter(src, 4); -#ifndef _LP64 - Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); - __ movptr(tmp, dst_klass_addr); - __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); - __ push(tmp); - __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); - __ push(tmp); - __ push(length); - __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); - __ push(tmp); - __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); - __ push(tmp); - - __ call_VM_leaf(copyfunc_addr, 5); -#else __ movl2ptr(length, length); //higher 32bits must be null __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); @@ -2973,8 +2613,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ call(RuntimeAddress(copyfunc_addr)); #endif -#endif - #ifndef PRODUCT if (PrintC1Statistics) { Label failed; @@ -3030,11 +2668,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // but not necessarily exactly of type default_type. Label known_ok, halt; __ mov_metadata(tmp, default_type->constant_encoding()); -#ifdef _LP64 if (UseCompressedClassPointers) { __ encode_klass_not_null(tmp, rscratch1); } -#endif if (basic_type != T_OBJECT) { __ cmp_klass(tmp, dst, tmp2); @@ -3059,21 +2695,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { } #endif -#ifdef _LP64 assert_different_registers(c_rarg0, dst, dst_pos, length); __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); assert_different_registers(c_rarg1, length); __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ mov(c_rarg2, length); -#else - __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); - store_parameter(tmp, 0); - __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); - store_parameter(tmp, 1); - store_parameter(length, 2); -#endif // _LP64 - bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; const char *name; @@ -3146,7 +2773,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { ciMethod* method = op->profiled_method(); int bci = op->profiled_bci(); ciMethod* callee = op->profiled_callee(); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + Register tmp_load_klass = rscratch1; // Update counter for all call types ciMethodData* md = method->method_data_or_null(); @@ -3217,7 +2844,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { Register obj = op->obj()->as_register(); Register tmp = op->tmp()->as_pointer_register(); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + Register tmp_load_klass = rscratch1; Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); ciKlass* exact_klass = op->exact_klass(); intptr_t current_klass = op->current_klass(); @@ -3237,17 +2864,9 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { #ifdef ASSERT if (obj == tmp) { -#ifdef _LP64 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index()); -#else - assert_different_registers(obj, mdo_addr.base(), mdo_addr.index()); -#endif } else { -#ifdef _LP64 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index()); -#else - assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index()); -#endif } #endif if (do_null) { @@ -3301,9 +2920,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { } else { __ load_klass(tmp, obj, tmp_load_klass); } -#ifdef _LP64 __ mov(rscratch1, tmp); // save original value before XOR -#endif __ xorptr(tmp, mdo_addr); __ testptr(tmp, TypeEntries::type_klass_mask); // klass seen before, nothing to do. The unknown bit may have been @@ -3316,7 +2933,6 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { if (TypeEntries::is_type_none(current_klass)) { __ testptr(mdo_addr, TypeEntries::type_mask); __ jccb(Assembler::zero, none); -#ifdef _LP64 // There is a chance that the checks above (re-reading profiling // data from memory) fail if another thread has just set the // profiling to this obj's klass @@ -3324,7 +2940,6 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { __ xorptr(tmp, mdo_addr); __ testptr(tmp, TypeEntries::type_klass_mask); __ jccb(Assembler::zero, next); -#endif } } else { assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && @@ -3418,22 +3033,9 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { } else if (left->is_double_cpu()) { Register lo = left->as_register_lo(); -#ifdef _LP64 Register dst = dest->as_register_lo(); __ movptr(dst, lo); __ negptr(dst); -#else - Register hi = left->as_register_hi(); - __ lneg(hi, lo); - if (dest->as_register_lo() == hi) { - assert(dest->as_register_hi() != lo, "destroying register"); - move_regs(hi, dest->as_register_hi()); - move_regs(lo, dest->as_register_lo()); - } else { - move_regs(lo, dest->as_register_lo()); - move_regs(hi, dest->as_register_hi()); - } -#endif // _LP64 } else if (dest->is_single_xmm()) { assert(!tmp->is_valid(), "do not need temporary"); @@ -3496,13 +3098,7 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, if (src->is_double_xmm()) { if (dest->is_double_cpu()) { -#ifdef _LP64 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); -#else - __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); - __ psrlq(src->as_xmm_double_reg(), 32); - __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); -#endif // _LP64 } else if (dest->is_double_stack()) { __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); } else if (dest->is_address()) { @@ -3519,6 +3115,7 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, } else { ShouldNotReachHere(); } + } else { ShouldNotReachHere(); } @@ -3601,12 +3198,7 @@ void LIR_Assembler::on_spin_wait() { void LIR_Assembler::get_thread(LIR_Opr result_reg) { assert(result_reg->is_register(), "check"); -#ifdef _LP64 - // __ get_thread(result_reg->as_register_lo()); __ mov(result_reg->as_register(), r15_thread); -#else - __ get_thread(result_reg->as_register()); -#endif // _LP64 } @@ -3627,7 +3219,6 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr } else if (data->is_oop()) { assert (code == lir_xchg, "xadd for oops"); Register obj = data->as_register(); -#ifdef _LP64 if (UseCompressedOops) { __ encode_heap_oop(obj); __ xchgl(obj, as_Address(src->as_address_ptr())); @@ -3635,11 +3226,7 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr } else { __ xchgptr(obj, as_Address(src->as_address_ptr())); } -#else - __ xchgl(obj, as_Address(src->as_address_ptr())); -#endif } else if (data->type() == T_LONG) { -#ifdef _LP64 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); if (code == lir_xadd) { __ lock(); @@ -3647,9 +3234,6 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr } else { __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); } -#else - ShouldNotReachHere(); -#endif } else { ShouldNotReachHere(); } diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp index c8f97cece6d8b..8524dc90276f0 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp @@ -46,9 +46,9 @@ Register recv, Label* update_done); enum { - _call_stub_size = NOT_LP64(15) LP64_ONLY(28), + _call_stub_size = 28, _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), - _deopt_handler_size = NOT_LP64(10) LP64_ONLY(17) + _deopt_handler_size = 17 }; public: diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp index fe6d6a58b00dd..60ce3419dfb42 100644 --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -142,7 +142,6 @@ bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { LIR_Opr LIRGenerator::safepoint_poll_register() { - NOT_LP64( return new_register(T_ADDRESS); ) return LIR_OprFact::illegalOpr; } @@ -152,7 +151,6 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, assert(base->is_register(), "must be"); if (index->is_constant()) { LIR_Const *constant = index->as_constant_ptr(); -#ifdef _LP64 jlong c; if (constant->type() == T_INT) { c = (jlong(index->as_jint()) << shift) + disp; @@ -167,11 +165,6 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, __ move(index, tmp); return new LIR_Address(base, tmp, type); } -#else - return new LIR_Address(base, - ((intx)(constant->as_jint()) << shift) + disp, - type); -#endif } else { return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); } @@ -185,7 +178,6 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o LIR_Address* addr; if (index_opr->is_constant()) { int elem_size = type2aelembytes(type); -#ifdef _LP64 jint index = index_opr->as_jint(); jlong disp = offset_in_bytes + (jlong)(index) * elem_size; if (disp > max_jint) { @@ -197,28 +189,12 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o } else { addr = new LIR_Address(array_opr, (intx)disp, type); } -#else - // A displacement overflow can also occur for x86 but that is not a problem due to the 32-bit address range! - // Let's assume an array 'a' and an access with displacement 'disp'. When disp overflows, then "a + disp" will - // always be negative (i.e. underflows the 32-bit address range): - // Let N = 2^32: a + signed_overflow(disp) = a + disp - N. - // "a + disp" is always smaller than N. If an index was chosen which would point to an address beyond N, then - // range checks would catch that and throw an exception. Thus, a + disp < 0 holds which means that it always - // underflows the 32-bit address range: - // unsigned_underflow(a + signed_overflow(disp)) = unsigned_underflow(a + disp - N) - // = (a + disp - N) + N = a + disp - // This shows that we still end up at the correct address with a displacement overflow due to the 32-bit address - // range limitation. This overflow only needs to be handled if addresses can be larger as on 64-bit platforms. - addr = new LIR_Address(array_opr, offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); -#endif // _LP64 } else { -#ifdef _LP64 if (index_opr->type() == T_INT) { LIR_Opr tmp = new_register(T_LONG); __ convert(Bytecodes::_i2l, index_opr, tmp); index_opr = tmp; } -#endif // _LP64 addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), @@ -358,34 +334,12 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { left.dont_load_item(); } -#ifndef _LP64 - // do not load right operand if it is a constant. only 0 and 1 are - // loaded because there are special instructions for loading them - // without memory access (not needed for SSE2 instructions) - bool must_load_right = false; - if (right.is_constant()) { - LIR_Const* c = right.result()->as_constant_ptr(); - assert(c != nullptr, "invalid constant"); - assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); - - if (c->type() == T_FLOAT) { - must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float()); - } else { - must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); - } - } -#endif // !LP64 - if (must_load_both) { // frem and drem destroy also right operand, so move it to a new register right.set_destroys_register(); right.load_item(); } else if (right.is_register()) { right.load_item(); -#ifndef _LP64 - } else if (must_load_right) { - right.load_item(); -#endif // !LP64 } else { right.dont_load_item(); } @@ -395,7 +349,6 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { tmp = new_register(T_DOUBLE); } -#ifdef _LP64 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { // frem and drem are implemented as a direct call into the runtime. LIRItem left(x->x(), this); @@ -430,27 +383,6 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp); set_result(x, reg); } -#else - if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) { - // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots - LIR_Opr fpu0, fpu1; - if (x->op() == Bytecodes::_frem) { - fpu0 = LIR_OprFact::single_fpu(0); - fpu1 = LIR_OprFact::single_fpu(1); - } else { - fpu0 = LIR_OprFact::double_fpu(0); - fpu1 = LIR_OprFact::double_fpu(1); - } - __ move(right.result(), fpu1); // order of left and right operand is important! - __ move(left.result(), fpu0); - __ rem (fpu0, fpu1, fpu0); - __ move(fpu0, reg); - - } else { - arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp); - } - set_result(x, reg); -#endif // _LP64 } @@ -740,7 +672,7 @@ LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) value.load_item(); // Because we want a 2-arg form of xchg and xadd __ move(value.result(), result); - assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); + assert(type == T_INT || is_oop || type == T_LONG, "unexpected type"); __ xchg(addr, result, result, LIR_OprFact::illegalOpr); return result; } @@ -750,7 +682,7 @@ LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { value.load_item(); // Because we want a 2-arg form of xchg and xadd __ move(value.result(), result); - assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type"); + assert(type == T_INT || type == T_LONG, "unexpected type"); __ xadd(addr, result, result, LIR_OprFact::illegalOpr); return result; } @@ -788,10 +720,7 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || - x->id() == vmIntrinsics::_dlog10 -#ifdef _LP64 - || x->id() == vmIntrinsics::_dtanh -#endif + x->id() == vmIntrinsics::_dlog10 || x->id() == vmIntrinsics::_dtanh ) { do_LibmIntrinsic(x); return; @@ -799,12 +728,6 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { LIRItem value(x->argument_at(0), this); - bool use_fpu = false; -#ifndef _LP64 - if (UseSSE < 2) { - value.set_destroys_register(); - } -#endif // !LP64 value.load_item(); LIR_Opr calc_input = value.result(); @@ -832,10 +755,6 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { default: ShouldNotReachHere(); } - - if (use_fpu) { - __ move(calc_result, x->operand()); - } } void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { @@ -956,20 +875,6 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { flags = 0; } -#ifndef _LP64 - src.load_item_force (FrameMap::rcx_oop_opr); - src_pos.load_item_force (FrameMap::rdx_opr); - dst.load_item_force (FrameMap::rax_oop_opr); - dst_pos.load_item_force (FrameMap::rbx_opr); - length.load_item_force (FrameMap::rdi_opr); - LIR_Opr tmp = (FrameMap::rsi_opr); - - if (expected_type != nullptr && flags == 0) { - FrameMap* f = Compilation::current()->frame_map(); - f->update_reserved_argument_area_size(3 * BytesPerWord); - } -#else - // The java calling convention will give us enough registers // so that on the stub side the args will be perfect already. // On the other slow/special case side we call C and the arg @@ -985,7 +890,6 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { length.load_item_force (FrameMap::as_opr(j_rarg4)); LIR_Opr tmp = FrameMap::as_opr(j_rarg5); -#endif // LP64 set_no_result(x); @@ -1027,18 +931,11 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) { } LIR_Opr base_op = buf.result(); -#ifndef _LP64 - if (!is_updateBytes) { // long b raw address - base_op = new_register(T_INT); - __ convert(Bytecodes::_l2i, buf.result(), base_op); - } -#else if (index->is_valid()) { LIR_Opr tmp = new_register(T_LONG); __ convert(Bytecodes::_i2l, index, tmp); index = tmp; } -#endif LIR_Address* a = new LIR_Address(base_op, index, @@ -1172,14 +1069,6 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { } LIR_Opr result_b = b.result(); -#ifndef _LP64 - result_a = new_register(T_INT); - __ convert(Bytecodes::_l2i, a.result(), result_a); - result_b = new_register(T_INT); - __ convert(Bytecodes::_l2i, b.result(), result_b); -#endif - - LIR_Address* addr_a = new LIR_Address(result_a, result_aOffset, constant_aOffset, @@ -1214,7 +1103,6 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { } void LIRGenerator::do_Convert(Convert* x) { -#ifdef _LP64 LIRItem value(x->value(), this); value.load_item(); LIR_Opr input = value.result(); @@ -1222,66 +1110,6 @@ void LIRGenerator::do_Convert(Convert* x) { __ convert(x->op(), input, result); assert(result->is_virtual(), "result must be virtual register"); set_result(x, result); -#else - // flags that vary for the different operations and different SSE-settings - bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false; - - switch (x->op()) { - case Bytecodes::_i2l: // fall through - case Bytecodes::_l2i: // fall through - case Bytecodes::_i2b: // fall through - case Bytecodes::_i2c: // fall through - case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; - - case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break; - case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break; - case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break; - case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; - case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; - case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; - case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break; - case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break; - case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; - case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; - default: ShouldNotReachHere(); - } - - LIRItem value(x->value(), this); - value.load_item(); - LIR_Opr input = value.result(); - LIR_Opr result = rlock(x); - - // arguments of lir_convert - LIR_Opr conv_input = input; - LIR_Opr conv_result = result; - ConversionStub* stub = nullptr; - - if (fixed_input) { - conv_input = fixed_register_for(input->type()); - __ move(input, conv_input); - } - - assert(fixed_result == false || round_result == false, "cannot set both"); - if (fixed_result) { - conv_result = fixed_register_for(result->type()); - } else if (round_result) { - result = new_register(result->type()); - set_vreg_flag(result, must_start_in_memory); - } - - if (needs_stub) { - stub = new ConversionStub(x->op(), conv_input, conv_result); - } - - __ convert(x->op(), conv_input, conv_result, stub); - - if (result != conv_result) { - __ move(conv_result, result); - } - - assert(result->is_virtual(), "result must be virtual register"); - set_result(x, result); -#endif // _LP64 } @@ -1547,13 +1375,7 @@ void LIRGenerator::do_If(If* x) { LIR_Opr LIRGenerator::getThreadPointer() { -#ifdef _LP64 return FrameMap::as_pointer_opr(r15_thread); -#else - LIR_Opr result = new_register(T_INT); - __ get_thread(result); - return result; -#endif // } void LIRGenerator::trace_block_entry(BlockBegin* block) { @@ -1598,12 +1420,6 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, LIR_Opr temp_double = new_register(T_DOUBLE); __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); __ volatile_move(temp_double, result, T_LONG); -#ifndef _LP64 - if (UseSSE < 2) { - // no spill slot needed in SSE2 mode because xmm->cpu register move is possible - set_vreg_flag(result, must_start_in_memory); - } -#endif // !LP64 } else { __ load(address, result, info); } diff --git a/src/hotspot/cpu/x86/c1_LIR_x86.cpp b/src/hotspot/cpu/x86/c1_LIR_x86.cpp index adcc53c44ce14..ce831c5f95649 100644 --- a/src/hotspot/cpu/x86/c1_LIR_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIR_x86.cpp @@ -58,16 +58,9 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) { #ifndef PRODUCT void LIR_Address::verify() const { -#ifdef _LP64 assert(base()->is_cpu_register(), "wrong base operand"); assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand"); assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA, "wrong type for addresses"); -#else - assert(base()->is_single_cpu(), "wrong base operand"); - assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand"); - assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA, - "wrong type for addresses"); -#endif } #endif // PRODUCT diff --git a/src/hotspot/cpu/x86/c1_LinearScan_x86.hpp b/src/hotspot/cpu/x86/c1_LinearScan_x86.hpp index 8669c9ab8a10b..62a1bd6510e41 100644 --- a/src/hotspot/cpu/x86/c1_LinearScan_x86.hpp +++ b/src/hotspot/cpu/x86/c1_LinearScan_x86.hpp @@ -26,12 +26,6 @@ #define CPU_X86_C1_LINEARSCAN_X86_HPP inline bool LinearScan::is_processed_reg_num(int reg_num) { -#ifndef _LP64 - // rsp and rbp (numbers 6 ancd 7) are ignored - assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below"); - assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); - assert(reg_num >= 0, "invalid reg_num"); -#else // rsp and rbp, r10, r15 (numbers [12,15]) are ignored // r12 (number 11) is conditional on compressed oops. assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below"); @@ -40,16 +34,10 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) { assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below"); assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below"); assert(reg_num >= 0, "invalid reg_num"); -#endif // _LP64 return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map; } inline int LinearScan::num_physical_regs(BasicType type) { - // Intel requires two cpu registers for long, - // but requires only one fpu register for double - if (LP64_ONLY(false &&) type == T_LONG) { - return 2; - } return 1; } diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 238a1bd048a9f..7ffa6d9bdd4fd 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -62,15 +62,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr } if (LockingMode == LM_LIGHTWEIGHT) { -#ifdef _LP64 const Register thread = r15_thread; lightweight_lock(disp_hdr, obj, hdr, thread, tmp, slow_case); -#else - // Implicit null check. - movptr(hdr, Address(obj, oopDesc::mark_offset_in_bytes())); - // Lacking registers and thread on x86_32. Always take slow path. - jmp(slow_case); -#endif } else if (LockingMode == LM_LEGACY) { Label done; // Load object header @@ -135,12 +128,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ verify_oop(obj); if (LockingMode == LM_LIGHTWEIGHT) { -#ifdef _LP64 lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case); -#else - // Lacking registers and thread on x86_32. Always take slow path. - jmp(slow_case); -#endif } else if (LockingMode == LM_LEGACY) { // test if object header is pointing to the displaced header, and if so, restore // the displaced header in the object - if the object header is not pointing to @@ -169,7 +157,6 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len, t1, t2); -#ifdef _LP64 if (UseCompactObjectHeaders) { movptr(t1, Address(klass, Klass::prototype_header_offset())); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); @@ -178,16 +165,13 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register movptr(t1, klass); encode_klass_not_null(t1, rscratch1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); - } else -#endif - { + } else { movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); } if (len->is_valid()) { movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); -#ifdef _LP64 int base_offset = arrayOopDesc::length_offset_in_bytes() + BytesPerInt; if (!is_aligned(base_offset, BytesPerWord)) { assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned"); @@ -195,14 +179,10 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register xorl(t1, t1); movl(Address(obj, base_offset), t1); } -#endif - } -#ifdef _LP64 - else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { + } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { xorptr(t1, t1); store_klass_gap(obj, t1); } -#endif } @@ -265,8 +245,6 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register bind(loop); movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)), t1_zero); - NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)), - t1_zero);) decrement(index); jcc(Assembler::notZero, loop); } diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp index cb4cb3af8c3d4..bb5111fa65236 100644 --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp @@ -51,27 +51,18 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { // setup registers - const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) + const Register thread = r15_thread; // is callee-saved register (Visual C++ calling conventions) assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); assert(args_size >= 0, "illegal args_size"); bool align_stack = false; -#ifdef _LP64 + // At a method handle call, the stack may not be properly aligned // when returning with an exception. align_stack = (stub_id() == (int)C1StubId::handle_exception_from_callee_id); -#endif -#ifdef _LP64 mov(c_rarg0, thread); set_num_rt_args(0); // Nothing on stack -#else - set_num_rt_args(1 + args_size); - - // push java thread (becomes first argument of C function) - get_thread(thread); - push(thread); -#endif // _LP64 int call_offset = -1; if (!align_stack) { @@ -104,9 +95,6 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre #endif reset_last_Java_frame(thread, true); - // discard thread and arguments - NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); - // check for pending exceptions { Label L; cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); @@ -144,17 +132,12 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { -#ifdef _LP64 mov(c_rarg1, arg1); -#else - push(arg1); -#endif // _LP64 return call_RT(oop_result1, metadata_result, entry, 1); } int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { -#ifdef _LP64 if (c_rarg1 == arg2) { if (c_rarg2 == arg1) { xchgq(arg1, arg2); @@ -166,16 +149,11 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre mov(c_rarg1, arg1); mov(c_rarg2, arg2); } -#else - push(arg2); - push(arg1); -#endif // _LP64 return call_RT(oop_result1, metadata_result, entry, 2); } int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { -#ifdef _LP64 // if there is any conflict use the stack if (arg1 == c_rarg2 || arg1 == c_rarg3 || arg2 == c_rarg1 || arg2 == c_rarg3 || @@ -191,11 +169,6 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre mov(c_rarg2, arg2); mov(c_rarg3, arg3); } -#else - push(arg3); - push(arg2); - push(arg1); -#endif // _LP64 return call_RT(oop_result1, metadata_result, entry, 3); } @@ -262,20 +235,13 @@ const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; // but the code in save_live_registers will take the argument count into // account. // -#ifdef _LP64 - #define SLOT2(x) x, - #define SLOT_PER_WORD 2 -#else - #define SLOT2(x) - #define SLOT_PER_WORD 1 -#endif // _LP64 +#define SLOT2(x) x, +#define SLOT_PER_WORD 2 enum reg_save_layout { // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that // happen and will assert if the stack size we create is misaligned -#ifdef _LP64 align_dummy_0, align_dummy_1, -#endif // _LP64 #ifdef _WIN64 // Windows always allocates space for it's argument registers (see // frame::arg_reg_save_area_bytes). @@ -291,7 +257,6 @@ enum reg_save_layout { fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 extra_space_offset, // 360 -#ifdef _LP64 r15_off = extra_space_offset, r15H_off, // 360, 364 r14_off, r14H_off, // 368, 372 r13_off, r13H_off, // 376, 380 @@ -301,9 +266,6 @@ enum reg_save_layout { r9_off, r9H_off, // 408, 412 r8_off, r8H_off, // 416, 420 rdi_off, rdiH_off, // 424, 428 -#else - rdi_off = extra_space_offset, -#endif // _LP64 rsi_off, SLOT2(rsiH_off) // 432, 436 rbp_off, SLOT2(rbpH_off) // 440, 444 rsp_off, SLOT2(rspH_off) // 448, 452 @@ -329,8 +291,8 @@ static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { // In 64bit all the args are in regs so there are no additional stack slots - LP64_ONLY(num_rt_args = 0); - LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) + num_rt_args = 0; + assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned"); int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); @@ -343,7 +305,6 @@ static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); -#ifdef _LP64 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); @@ -369,37 +330,10 @@ static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); -#endif // _LP64 int xmm_bypass_limit = FrameMap::get_num_caller_save_xmms(); if (save_fpu_registers) { -#ifndef _LP64 - if (UseSSE < 2) { - int fpu_off = float_regs_as_doubles_off; - for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { - VMReg fpu_name_0 = FrameMap::fpu_regname(n); - map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); - // %%% This is really a waste but we'll keep things as they were for now - if (true) { - map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); - } - fpu_off += 2; - } - assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); - - if (UseSSE == 1) { - int xmm_off = xmm_regs_as_doubles_off; - for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { - VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); - map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); - xmm_off += 2; - } - assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); - } - } -#endif // !LP64 - if (UseSSE >= 2) { int xmm_off = xmm_regs_as_doubles_off; for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { @@ -426,14 +360,7 @@ void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) __ block_comment("save_live_registers"); // Push CPU state in multiple of 16 bytes -#ifdef _LP64 __ save_legacy_gprs(); -#else - __ pusha(); -#endif - - // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); - // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); @@ -442,46 +369,6 @@ void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) #endif if (save_fpu_registers) { -#ifndef _LP64 - if (UseSSE < 2) { - // save FPU stack - __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); - __ fwait(); - -#ifdef ASSERT - Label ok; - __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::x86::fpu_cntrl_wrd_std()); - __ jccb(Assembler::equal, ok); - __ stop("corrupted control word detected"); - __ bind(ok); -#endif - - // Reset the control word to guard against exceptions being unmasked - // since fstp_d can cause FPU stack underflow exceptions. Write it - // into the on stack copy and then reload that to make sure that the - // current and future values are correct. - __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::x86::fpu_cntrl_wrd_std()); - __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); - - // Save the FPU registers in de-opt-able form - int offset = 0; - for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { - __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); - offset += 8; - } - - if (UseSSE == 1) { - // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) - int offset = 0; - for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { - XMMRegister xmm_name = as_XMMRegister(n); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); - offset += 8; - } - } - } -#endif // !_LP64 - if (UseSSE >= 2) { // save XMM registers // XMM registers can contain float or double values, but this is not known here, @@ -497,16 +384,12 @@ void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) } } } - - // FPU stack must be empty now - NOT_LP64( __ verify_FPU(0, "save_live_registers"); ) } #undef __ #define __ sasm-> static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) { -#ifdef _LP64 if (restore_fpu_registers) { // restore XMM registers int xmm_bypass_limit = FrameMap::get_num_caller_save_xmms(); @@ -517,38 +400,6 @@ static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) { offset += 8; } } -#else - if (restore_fpu_registers) { - if (UseSSE >= 2) { - // restore XMM registers - int xmm_bypass_limit = FrameMap::nof_xmm_regs; - int offset = 0; - for (int n = 0; n < xmm_bypass_limit; n++) { - XMMRegister xmm_name = as_XMMRegister(n); - __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); - offset += 8; - } - } else if (UseSSE == 1) { - // restore XMM registers(num MMX == num fpu) - int offset = 0; - for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { - XMMRegister xmm_name = as_XMMRegister(n); - __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); - offset += 8; - } - } - - if (UseSSE < 2) { - __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); - } else { - // check that FPU stack is really empty - __ verify_FPU(0, "restore_live_registers"); - } - } else { - // check that FPU stack is really empty - __ verify_FPU(0, "restore_live_registers"); - } -#endif // _LP64 #ifdef ASSERT { @@ -570,12 +421,7 @@ void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) { __ block_comment("restore_live_registers"); restore_fpu(this, restore_fpu_registers); -#ifdef _LP64 __ restore_legacy_gprs(); -#else - __ popa(); -#endif - } @@ -584,7 +430,6 @@ void C1_MacroAssembler::restore_live_registers_except_rax(bool restore_fpu_regis restore_fpu(this, restore_fpu_registers); -#ifdef _LP64 __ movptr(r15, Address(rsp, 0)); __ movptr(r14, Address(rsp, wordSize)); __ movptr(r13, Address(rsp, 2 * wordSize)); @@ -602,17 +447,6 @@ void C1_MacroAssembler::restore_live_registers_except_rax(bool restore_fpu_regis __ movptr(rcx, Address(rsp, 14 * wordSize)); __ addptr(rsp, 16 * wordSize); -#else - - __ pop(rdi); - __ pop(rsi); - __ pop(rbp); - __ pop(rbx); // skip this value - __ pop(rbx); - __ pop(rdx); - __ pop(rcx); - __ addptr(rsp, BytesPerWord); -#endif // _LP64 } #undef __ @@ -639,12 +473,7 @@ void Runtime1::initialize_pd() { // return: offset in 64-bit words. uint Runtime1::runtime_blob_current_thread_offset(frame f) { -#ifdef _LP64 return r15_off / 2; // rsp offsets are in halfwords -#else - Unimplemented(); - return 0; -#endif } // Target: the entry point of the method that creates and posts the exception oop. @@ -664,15 +493,8 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe // Load arguments for exception that are passed as arguments into the stub. if (has_argument) { -#ifdef _LP64 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); __ movptr(c_rarg2, Address(rbp, 3*BytesPerWord)); -#else - __ movptr(temp_reg, Address(rbp, 3*BytesPerWord)); - __ push(temp_reg); - __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); - __ push(temp_reg); -#endif // _LP64 } int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); @@ -692,7 +514,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) const Register exception_oop = rax; const Register exception_pc = rdx; // other registers used in this stub - const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); + const Register thread = r15_thread; // Save registers, if required. OopMapSet* oop_maps = new OopMapSet(); @@ -725,7 +547,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) case C1StubId::handle_exception_from_callee_id: { // At this point all registers except exception oop (RAX) and // exception pc (RDX) are dead. - const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); + const int frame_size = 2 /*BP, return address*/ WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); sasm->set_frame_size(frame_size); WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); @@ -734,21 +556,11 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) default: ShouldNotReachHere(); } -#if !defined(_LP64) && defined(COMPILER2) - if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) { - // C2 can leave the fpu stack dirty - __ empty_FPU_stack(); - } -#endif // !_LP64 && COMPILER2 - // verify that only rax, and rdx is valid at this time __ invalidate_registers(false, true, true, false, true, true); // verify that rax, contains a valid exception __ verify_not_null_oop(exception_oop); - // load address of JavaThread object for thread-local data - NOT_LP64(__ get_thread(thread);) - #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are // empty before writing to them @@ -815,11 +627,11 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) { // incoming parameters const Register exception_oop = rax; // callee-saved copy of exception_oop during runtime call - const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); + const Register exception_oop_callee_saved = r14; // other registers used in this stub const Register exception_pc = rdx; const Register handler_addr = rbx; - const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); + const Register thread = r15_thread; if (AbortVMOnException) { __ enter(); @@ -834,7 +646,6 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) { #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty - NOT_LP64(__ get_thread(thread);) Label oop_empty; __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); __ jcc(Assembler::equal, oop_empty); @@ -848,14 +659,10 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) { __ bind(pc_empty); #endif - // clear the FPU stack in case any FPU results are left behind - NOT_LP64( __ empty_FPU_stack(); ) - // save exception_oop in callee-saved register to preserve it during runtime calls __ verify_not_null_oop(exception_oop); __ movptr(exception_oop_callee_saved, exception_oop); - NOT_LP64(__ get_thread(thread);) // Get return address (is on top of stack after leave). __ movptr(exception_pc, Address(rsp, 0)); @@ -905,18 +712,10 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { OopMap* oop_map = save_live_registers(sasm, num_rt_args); -#ifdef _LP64 const Register thread = r15_thread; // No need to worry about dummy __ mov(c_rarg0, thread); -#else - __ push(rax); // push dummy - - const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) - // push java thread (becomes first argument of C function) - __ get_thread(thread); - __ push(thread); -#endif // _LP64 + __ set_last_Java_frame(thread, noreg, rbp, nullptr, rscratch1); // do the call __ call(RuntimeAddress(target)); @@ -936,10 +735,6 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { __ pop(rax); #endif __ reset_last_Java_frame(thread, true); -#ifndef _LP64 - __ pop(rcx); // discard thread arg - __ pop(rcx); // discard dummy -#endif // _LP64 // check for pending exceptions { Label L; @@ -1166,15 +961,8 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // This is called via call_runtime so the arguments // will be place in C abi locations -#ifdef _LP64 __ verify_oop(c_rarg0); __ mov(rax, c_rarg0); -#else - // The object is passed on the stack and we haven't pushed a - // frame yet so it's one work away from top of stack. - __ movptr(rax, Address(rsp, 1 * BytesPerWord)); - __ verify_oop(rax); -#endif // _LP64 // load the klass and check the has finalizer flag Label register_finalizer; @@ -1467,9 +1255,8 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // the live registers get saved. save_live_registers(sasm, 1); - __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); + __ mov(c_rarg0, rax); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, static_cast(SharedRuntime::dtrace_object_alloc)))); - NOT_LP64(__ pop(rax)); restore_live_registers(sasm); } @@ -1477,7 +1264,6 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { case C1StubId::fpu2long_stub_id: { -#ifdef _LP64 Label done; __ cvttsd2siq(rax, Address(rsp, wordSize)); __ cmp64(rax, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); @@ -1489,78 +1275,6 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { __ pop(rax); __ bind(done); __ ret(0); -#else - // rax, and rdx are destroyed, but should be free since the result is returned there - // preserve rsi,ecx - __ push(rsi); - __ push(rcx); - - // check for NaN - Label return0, do_return, return_min_jlong, do_convert; - - Address value_high_word(rsp, wordSize + 4); - Address value_low_word(rsp, wordSize); - Address result_high_word(rsp, 3*wordSize + 4); - Address result_low_word(rsp, 3*wordSize); - - __ subptr(rsp, 32); // more than enough on 32bit - __ fst_d(value_low_word); - __ movl(rax, value_high_word); - __ andl(rax, 0x7ff00000); - __ cmpl(rax, 0x7ff00000); - __ jcc(Assembler::notEqual, do_convert); - __ movl(rax, value_high_word); - __ andl(rax, 0xfffff); - __ orl(rax, value_low_word); - __ jcc(Assembler::notZero, return0); - - __ bind(do_convert); - __ fnstcw(Address(rsp, 0)); - __ movzwl(rax, Address(rsp, 0)); - __ orl(rax, 0xc00); - __ movw(Address(rsp, 2), rax); - __ fldcw(Address(rsp, 2)); - __ fwait(); - __ fistp_d(result_low_word); - __ fldcw(Address(rsp, 0)); - __ fwait(); - // This gets the entire long in rax on 64bit - __ movptr(rax, result_low_word); - // testing of high bits - __ movl(rdx, result_high_word); - __ mov(rcx, rax); - // What the heck is the point of the next instruction??? - __ xorl(rcx, 0x0); - __ movl(rsi, 0x80000000); - __ xorl(rsi, rdx); - __ orl(rcx, rsi); - __ jcc(Assembler::notEqual, do_return); - __ fldz(); - __ fcomp_d(value_low_word); - __ fnstsw_ax(); - __ sahf(); - __ jcc(Assembler::above, return_min_jlong); - // return max_jlong - __ movl(rdx, 0x7fffffff); - __ movl(rax, 0xffffffff); - __ jmp(do_return); - - __ bind(return_min_jlong); - __ movl(rdx, 0x80000000); - __ xorl(rax, rax); - __ jmp(do_return); - - __ bind(return0); - __ fpop(); - __ xorptr(rdx,rdx); - __ xorptr(rax,rax); - - __ bind(do_return); - __ addptr(rsp, 32); - __ pop(rcx); - __ pop(rsi); - __ ret(0); -#endif // _LP64 } break;