diff --git a/.github/workflows/build-cross-compile.yml b/.github/workflows/build-cross-compile.yml index 168c5924d8610..6398489c842c7 100644 --- a/.github/workflows/build-cross-compile.yml +++ b/.github/workflows/build-cross-compile.yml @@ -29,17 +29,14 @@ on: workflow_call: inputs: gcc-major-version: - required: false + required: true type: string - default: '10' apt-gcc-version: - required: false + required: true type: string - default: '10.3.0-1ubuntu1~20.04' - apt-gcc-cross-suffix: - required: false + apt-gcc-cross-version: + required: true type: string - default: 'cross1' jobs: build-cross-compile: @@ -95,8 +92,8 @@ jobs: sudo apt-get install \ gcc-${{ inputs.gcc-major-version }}=${{ inputs.apt-gcc-version }} \ g++-${{ inputs.gcc-major-version }}=${{ inputs.apt-gcc-version }} \ - gcc-${{ inputs.gcc-major-version }}-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}=${{ inputs.apt-gcc-version }}${{ inputs.apt-gcc-cross-suffix }} \ - g++-${{ inputs.gcc-major-version }}-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}=${{ inputs.apt-gcc-version }}${{ inputs.apt-gcc-cross-suffix }} \ + gcc-${{ inputs.gcc-major-version }}-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}=${{ inputs.apt-gcc-cross-version }} \ + g++-${{ inputs.gcc-major-version }}-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}=${{ inputs.apt-gcc-cross-version }} \ libxrandr-dev libxtst-dev libcups2-dev libasound2-dev sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ inputs.gcc-major-version }} 100 --slave /usr/bin/g++ g++ /usr/bin/g++-${{ inputs.gcc-major-version }} @@ -143,8 +140,8 @@ jobs: --openjdk-target=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}} --with-sysroot=sysroot --with-build-jdk=${{ steps.buildjdk.outputs.jdk-path }} - CC=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-gcc-10 - CXX=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-g++-10 + CC=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-gcc-${{ inputs.gcc-major-version }} + CXX=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-g++-${{ inputs.gcc-major-version }} - name: 'Build' id: build diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index b509ea6e7fed3..992807f2d8bd1 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -42,6 +42,13 @@ on: required: false type: string default: '[ "debug", "release" ]' + gcc-major-version: + required: true + type: string + gcc-package-suffix: + required: false + type: string + default: '' apt-gcc-version: required: true type: string @@ -101,8 +108,8 @@ jobs: fi sudo apt-get update sudo apt-get install --only-upgrade apt - sudo apt-get install gcc-${{ inputs.apt-gcc-version }} g++-${{ inputs.apt-gcc-version }} libxrandr-dev${{ steps.arch.outputs.suffix }} libxtst-dev${{ steps.arch.outputs.suffix }} libcups2-dev${{ steps.arch.outputs.suffix }} libasound2-dev${{ steps.arch.outputs.suffix }} ${{ inputs.apt-extra-packages }} - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 + sudo apt-get install gcc-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }}=${{ inputs.apt-gcc-version }} g++-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }}=${{ inputs.apt-gcc-version }} libxrandr-dev${{ steps.arch.outputs.suffix }} libxtst-dev${{ steps.arch.outputs.suffix }} libcups2-dev${{ steps.arch.outputs.suffix }} libasound2-dev${{ steps.arch.outputs.suffix }} ${{ inputs.apt-extra-packages }} + sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ inputs.gcc-major-version }} 100 --slave /usr/bin/g++ g++ /usr/bin/g++-${{ inputs.gcc-major-version }} - name: 'Configure' run: > diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ec7e3c9957b2c..f4af7dce0df43 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -123,7 +123,8 @@ jobs: uses: ./.github/workflows/build-linux.yml with: platform: linux-x64 - apt-gcc-version: '10=10.3.0-1ubuntu1~20.04' + gcc-major-version: '10' + apt-gcc-version: '10.3.0-1ubuntu1~20.04' # The linux-x64 jdk bundle is used as buildjdk for the cross-compile job if: needs.select.outputs.linux-x64 == 'true' || needs.select.outputs.linux-cross-compile == 'true' @@ -133,7 +134,9 @@ jobs: uses: ./.github/workflows/build-linux.yml with: platform: linux-x86 - apt-gcc-version: '10-multilib' + gcc-major-version: '10' + gcc-package-suffix: '-multilib' + apt-gcc-version: '10.3.0-1ubuntu1~20.04' apt-architecture: 'i386' # Some multilib libraries do not have proper inter-dependencies, so we have to # install their dependencies manually. @@ -149,7 +152,8 @@ jobs: platform: linux-x64 make-target: 'hotspot' debug-levels: '[ "debug" ]' - apt-gcc-version: '10=10.3.0-1ubuntu1~20.04' + gcc-major-version: '10' + apt-gcc-version: '10.3.0-1ubuntu1~20.04' extra-conf-options: '--disable-precompiled-headers' if: needs.select.outputs.linux-x64-variants == 'true' @@ -161,7 +165,8 @@ jobs: platform: linux-x64 make-target: 'hotspot' debug-levels: '[ "debug" ]' - apt-gcc-version: '10=10.3.0-1ubuntu1~20.04' + gcc-major-version: '10' + apt-gcc-version: '10.3.0-1ubuntu1~20.04' extra-conf-options: '--with-jvm-variants=zero --disable-precompiled-headers' if: needs.select.outputs.linux-x64-variants == 'true' @@ -173,7 +178,8 @@ jobs: platform: linux-x64 make-target: 'hotspot' debug-levels: '[ "debug" ]' - apt-gcc-version: '10=10.3.0-1ubuntu1~20.04' + gcc-major-version: '10' + apt-gcc-version: '10.3.0-1ubuntu1~20.04' extra-conf-options: '--with-jvm-variants=minimal --disable-precompiled-headers' if: needs.select.outputs.linux-x64-variants == 'true' @@ -186,7 +192,8 @@ jobs: make-target: 'hotspot' # Technically this is not the "debug" level, but we can't inject a new matrix state for just this job debug-levels: '[ "debug" ]' - apt-gcc-version: '10=10.3.0-1ubuntu1~20.04' + gcc-major-version: '10' + apt-gcc-version: '10.3.0-1ubuntu1~20.04' extra-conf-options: '--with-debug-level=optimized --disable-precompiled-headers' if: needs.select.outputs.linux-x64-variants == 'true' @@ -196,6 +203,10 @@ jobs: - select - build-linux-x64 uses: ./.github/workflows/build-cross-compile.yml + with: + gcc-major-version: '10' + apt-gcc-version: '10.3.0-1ubuntu1~20.04' + apt-gcc-cross-version: '10.3.0-1ubuntu1~20.04cross1' if: needs.select.outputs.linux-cross-compile == 'true' build-macos-x64: diff --git a/doc/testing.html b/doc/testing.html index 213e23664c37c..32edbe2331c03 100644 --- a/doc/testing.html +++ b/doc/testing.html @@ -64,6 +64,9 @@

Using "make test" (the

Configuration

To be able to run JTReg tests, configure needs to know where to find the JTReg test framework. If it is not picked up automatically by configure, use the --with-jtreg=<path to jtreg home> option to point to the JTReg framework. Note that this option should point to the JTReg home, i.e. the top directory, containing lib/jtreg.jar etc. (An alternative is to set the JT_HOME environment variable to point to the JTReg home before running configure.)

To be able to run microbenchmarks, configure needs to know where to find the JMH dependency. Use --with-jmh=<path to JMH jars> to point to a directory containing the core JMH and transitive dependencies. The recommended dependencies can be retrieved by running sh make/devkit/createJMHBundle.sh, after which --with-jmh=build/jmh/jars should work.

+

When tests fail or timeout, jtreg runs its failure handler to capture necessary data from the system where the test was run. This data can then be used to analyze the test failures. Collecting this data involves running various commands (which are listed in files residing in test/failure_handler/src/share/conf) and some of these commands use sudo. If the system's sudoers file isn't configured to allow running these commands, then it can result in password being prompted during the failure handler execution. Typically, when running locally, collecting this additional data isn't always necessary. To disable running the failure handler, use --enable-jtreg-failure-handler=no when running configure. If, however, you want to let the failure handler to run and don't want to be prompted for sudo password, then you can configure your sudoers file appropriately. Please read the necessary documentation of your operating system to see how to do that; here we only show one possible way of doing that - edit the /etc/sudoers.d/sudoers file to include the following line:

+
johndoe ALL=(ALL) NOPASSWD: /sbin/dmesg
+

This line configures sudo to not prompt for password for the /sbin/dmesg command (this is one of the commands that is listed in the files at test/failure_handler/src/share/conf), for the user johndoe. Here johndoe is the user account under which the jtreg tests are run. Replace the username with a relevant user account of your system.

Test selection

All functionality is available using the test make target. In this use case, the test or tests to be executed is controlled using the TEST variable. To speed up subsequent test runs with no source code changes, test-only can be used instead, which do not depend on the source and test image build.

For some common top-level tests, direct make targets have been generated. This includes all JTReg test groups, the hotspot gtest, and custom tests (if present). This means that make test-tier1 is equivalent to make test TEST="tier1", but the latter is more tab-completion friendly. For more complex test runs, the test TEST="x" solution needs to be used.

diff --git a/doc/testing.md b/doc/testing.md index 3ab7079ea07f7..d9a79defd685a 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -43,6 +43,31 @@ containing the core JMH and transitive dependencies. The recommended dependencies can be retrieved by running `sh make/devkit/createJMHBundle.sh`, after which `--with-jmh=build/jmh/jars` should work. +When tests fail or timeout, jtreg runs its failure handler to capture necessary +data from the system where the test was run. This data can then be used to +analyze the test failures. Collecting this data involves running various commands +(which are listed in files residing in `test/failure_handler/src/share/conf`) +and some of these commands use `sudo`. If the system's `sudoers` file isn't +configured to allow running these commands, then it can result in password being +prompted during the failure handler execution. Typically, when running locally, +collecting this additional data isn't always necessary. To disable running the +failure handler, use `--enable-jtreg-failure-handler=no` when running `configure`. +If, however, you want to let the failure handler to run and don't want to be +prompted for sudo password, then you can configure your `sudoers` file +appropriately. Please read the necessary documentation of your operating system +to see how to do that; here we only show one possible way of doing that - edit +the `/etc/sudoers.d/sudoers` file to include the following line: + +``` +johndoe ALL=(ALL) NOPASSWD: /sbin/dmesg +``` + +This line configures `sudo` to _not_ prompt for password for the `/sbin/dmesg` +command (this is one of the commands that is listed in the files +at `test/failure_handler/src/share/conf`), for the user `johndoe`. Here `johndoe` +is the user account under which the jtreg tests are run. Replace the username +with a relevant user account of your system. + ## Test selection All functionality is available using the `test` make target. In this use case, diff --git a/make/CreateJmods.gmk b/make/CreateJmods.gmk index 8b67b03327c51..e6cd2fd76aec2 100644 --- a/make/CreateJmods.gmk +++ b/make/CreateJmods.gmk @@ -226,6 +226,12 @@ else JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.debuginfo,*.dSYM/**,*.dSYM}' endif +# Unless we are creating a very large module, use the small tool JVM options +JMOD_SMALL_FLAGS := +ifeq ($(findstring $(MODULE), java.base java.desktop jdk.localedata), ) + JMOD_SMALL_FLAGS += $(JAVA_TOOL_FLAGS_SMALL) +endif + # Create jmods in the support dir and then move them into place to keep the # module path in $(IMAGES_OUTPUTDIR)/jmods valid at all times. $(eval $(call SetupExecute, create_$(JMOD_FILE), \ @@ -234,7 +240,7 @@ $(eval $(call SetupExecute, create_$(JMOD_FILE), \ OUTPUT_FILE := $(JMODS_DIR)/$(JMOD_FILE), \ SUPPORT_DIR := $(JMODS_SUPPORT_DIR), \ PRE_COMMAND := $(RM) $(JMODS_DIR)/$(JMOD_FILE) $(JMODS_SUPPORT_DIR)/$(JMOD_FILE), \ - COMMAND := $(JMOD) create --module-version $(VERSION_SHORT) \ + COMMAND := $(JMOD) $(JMOD_SMALL_FLAGS) create --module-version $(VERSION_SHORT) \ --target-platform '$(OPENJDK_MODULE_TARGET_PLATFORM)' \ --module-path $(JMODS_DIR) $(JMOD_FLAGS) \ --date $(SOURCE_DATE_ISO_8601) \ diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in index 47b856a29ad4a..8fbf290f98492 100644 --- a/make/autoconf/spec.gmk.in +++ b/make/autoconf/spec.gmk.in @@ -664,7 +664,7 @@ JAVAC = $(JAVAC_CMD) JAVADOC = $(JAVADOC_CMD) JAR = $(JAR_CMD) JLINK = $(JLINK_CMD) -JMOD = $(JMOD_CMD) $(JAVA_TOOL_FLAGS_SMALL) +JMOD = $(JMOD_CMD) BUILD_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@ BUILD_JAVA=@FIXPATH@ $(BUILD_JDK)/bin/java $(BUILD_JAVA_FLAGS) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index bff53d24cafc3..b9d0561d40d1e 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1909,19 +1909,19 @@ static enum RC rc_class(OptoReg::Name reg) { } // we have 32 int registers * 2 halves - int slots_of_int_registers = RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers; + int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register; if (reg < slots_of_int_registers) { return rc_int; } // we have 32 float register * 8 halves - int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers; + int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register; if (reg < slots_of_int_registers + slots_of_float_registers) { return rc_float; } - int slots_of_predicate_registers = PRegisterImpl::max_slots_per_register * PRegisterImpl::number_of_registers; + int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register; if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) { return rc_predicate; } @@ -3368,7 +3368,7 @@ encode %{ } else { relocInfo::relocType rtype = $src->constant_reloc(); if (rtype == relocInfo::oop_type) { - __ movoop(dst_reg, (jobject)con, /*immediate*/true); + __ movoop(dst_reg, (jobject)con); } else if (rtype == relocInfo::metadata_type) { __ mov_metadata(dst_reg, (Metadata*)con); } else { diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp index f8f38a46ad74b..69510cd308952 100644 --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -62,23 +62,23 @@ class Argument { }; }; -REGISTER_DECLARATION(Register, c_rarg0, r0); -REGISTER_DECLARATION(Register, c_rarg1, r1); -REGISTER_DECLARATION(Register, c_rarg2, r2); -REGISTER_DECLARATION(Register, c_rarg3, r3); -REGISTER_DECLARATION(Register, c_rarg4, r4); -REGISTER_DECLARATION(Register, c_rarg5, r5); -REGISTER_DECLARATION(Register, c_rarg6, r6); -REGISTER_DECLARATION(Register, c_rarg7, r7); - -REGISTER_DECLARATION(FloatRegister, c_farg0, v0); -REGISTER_DECLARATION(FloatRegister, c_farg1, v1); -REGISTER_DECLARATION(FloatRegister, c_farg2, v2); -REGISTER_DECLARATION(FloatRegister, c_farg3, v3); -REGISTER_DECLARATION(FloatRegister, c_farg4, v4); -REGISTER_DECLARATION(FloatRegister, c_farg5, v5); -REGISTER_DECLARATION(FloatRegister, c_farg6, v6); -REGISTER_DECLARATION(FloatRegister, c_farg7, v7); +constexpr Register c_rarg0 = r0; +constexpr Register c_rarg1 = r1; +constexpr Register c_rarg2 = r2; +constexpr Register c_rarg3 = r3; +constexpr Register c_rarg4 = r4; +constexpr Register c_rarg5 = r5; +constexpr Register c_rarg6 = r6; +constexpr Register c_rarg7 = r7; + +constexpr FloatRegister c_farg0 = v0; +constexpr FloatRegister c_farg1 = v1; +constexpr FloatRegister c_farg2 = v2; +constexpr FloatRegister c_farg3 = v3; +constexpr FloatRegister c_farg4 = v4; +constexpr FloatRegister c_farg5 = v5; +constexpr FloatRegister c_farg6 = v6; +constexpr FloatRegister c_farg7 = v7; // Symbolically name the register arguments used by the Java calling convention. // We have control over the convention for java so we can do what we please. @@ -96,25 +96,25 @@ REGISTER_DECLARATION(FloatRegister, c_farg7, v7); // |--------------------------------------------------------------------| -REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); -REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); -REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); -REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); -REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); -REGISTER_DECLARATION(Register, j_rarg5, c_rarg6); -REGISTER_DECLARATION(Register, j_rarg6, c_rarg7); -REGISTER_DECLARATION(Register, j_rarg7, c_rarg0); +constexpr Register j_rarg0 = c_rarg1; +constexpr Register j_rarg1 = c_rarg2; +constexpr Register j_rarg2 = c_rarg3; +constexpr Register j_rarg3 = c_rarg4; +constexpr Register j_rarg4 = c_rarg5; +constexpr Register j_rarg5 = c_rarg6; +constexpr Register j_rarg6 = c_rarg7; +constexpr Register j_rarg7 = c_rarg0; // Java floating args are passed as per C -REGISTER_DECLARATION(FloatRegister, j_farg0, v0); -REGISTER_DECLARATION(FloatRegister, j_farg1, v1); -REGISTER_DECLARATION(FloatRegister, j_farg2, v2); -REGISTER_DECLARATION(FloatRegister, j_farg3, v3); -REGISTER_DECLARATION(FloatRegister, j_farg4, v4); -REGISTER_DECLARATION(FloatRegister, j_farg5, v5); -REGISTER_DECLARATION(FloatRegister, j_farg6, v6); -REGISTER_DECLARATION(FloatRegister, j_farg7, v7); +constexpr FloatRegister j_farg0 = v0; +constexpr FloatRegister j_farg1 = v1; +constexpr FloatRegister j_farg2 = v2; +constexpr FloatRegister j_farg3 = v3; +constexpr FloatRegister j_farg4 = v4; +constexpr FloatRegister j_farg5 = v5; +constexpr FloatRegister j_farg6 = v6; +constexpr FloatRegister j_farg7 = v7; // registers used to hold VM data either temporarily within a method // or across method calls @@ -123,40 +123,28 @@ REGISTER_DECLARATION(FloatRegister, j_farg7, v7); // r8 is used for indirect result location return // we use it and r9 as scratch registers -REGISTER_DECLARATION(Register, rscratch1, r8); -REGISTER_DECLARATION(Register, rscratch2, r9); +constexpr Register rscratch1 = r8; +constexpr Register rscratch2 = r9; // current method -- must be in a call-clobbered register -REGISTER_DECLARATION(Register, rmethod, r12); +constexpr Register rmethod = r12; // non-volatile (callee-save) registers are r16-29 // of which the following are dedicated global state -// link register -REGISTER_DECLARATION(Register, lr, r30); -// frame pointer -REGISTER_DECLARATION(Register, rfp, r29); -// current thread -REGISTER_DECLARATION(Register, rthread, r28); -// base of heap -REGISTER_DECLARATION(Register, rheapbase, r27); -// constant pool cache -REGISTER_DECLARATION(Register, rcpool, r26); -// r25 is a callee-saved temp -// REGISTER_DECLARATION(Register, unused, r25); -// locals on stack -REGISTER_DECLARATION(Register, rlocals, r24); -// bytecode pointer -REGISTER_DECLARATION(Register, rbcp, r22); -// Dispatch table base -REGISTER_DECLARATION(Register, rdispatch, r21); -// Java expression stack pointer -REGISTER_DECLARATION(Register, esp, r20); -// Sender's SP while in interpreter -REGISTER_DECLARATION(Register, r19_sender_sp, r19); +constexpr Register lr = r30; // link register +constexpr Register rfp = r29; // frame pointer +constexpr Register rthread = r28; // current thread +constexpr Register rheapbase = r27; // base of heap +constexpr Register rcpool = r26; // constant pool cache +constexpr Register rlocals = r24; // locals on stack +constexpr Register rbcp = r22; // bytecode pointer +constexpr Register rdispatch = r21; // dispatch table base +constexpr Register esp = r20; // Java expression stack pointer +constexpr Register r19_sender_sp = r19; // sender's SP while in interpreter // Preserved predicate register with all elements set TRUE. -REGISTER_DECLARATION(PRegister, ptrue, p7); +constexpr PRegister ptrue = p7; #define assert_cond(ARG1) assert(ARG1, #ARG1) @@ -277,29 +265,29 @@ class Instruction_aarch64 { } void rf(Register r, int lsb) { - f(r->encoding_nocheck(), lsb + 4, lsb); + f(r->raw_encoding(), lsb + 4, lsb); } // reg|ZR void zrf(Register r, int lsb) { - f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb); + f(r->raw_encoding() - (r == zr), lsb + 4, lsb); } // reg|SP void srf(Register r, int lsb) { - f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb); + f(r == sp ? 31 : r->raw_encoding(), lsb + 4, lsb); } void rf(FloatRegister r, int lsb) { - f(r->encoding_nocheck(), lsb + 4, lsb); + f(r->raw_encoding(), lsb + 4, lsb); } void prf(PRegister r, int lsb) { - f(r->encoding_nocheck(), lsb + 3, lsb); + f(r->raw_encoding(), lsb + 3, lsb); } void pgrf(PRegister r, int lsb) { - f(r->encoding_nocheck(), lsb + 2, lsb); + f(r->raw_encoding(), lsb + 2, lsb); } unsigned get(int msb = 31, int lsb = 0) { @@ -329,7 +317,7 @@ class Post : public PrePost { Register _idx; bool _is_postreg; public: - Post(Register reg, int o) : PrePost(reg, o) { _idx = NULL; _is_postreg = false; } + Post(Register reg, int o) : PrePost(reg, o) { _idx = noreg; _is_postreg = false; } Post(Register reg, Register idx) : PrePost(reg, 0) { _idx = idx; _is_postreg = true; } Register idx_reg() { return _idx; } bool is_postreg() {return _is_postreg; } @@ -627,8 +615,7 @@ class InternalAddress: public Address { InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {} }; -const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers * - FloatRegisterImpl::save_slots_per_register; +const int FPUStateSizeInWords = FloatRegister::number_of_registers * FloatRegister::save_slots_per_register; typedef enum { PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM, diff --git a/src/hotspot/cpu/aarch64/c1_Defs_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_Defs_aarch64.hpp index 4df27e7fde468..9072d09f18ca8 100644 --- a/src/hotspot/cpu/aarch64/c1_Defs_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_Defs_aarch64.hpp @@ -41,8 +41,8 @@ enum { // registers enum { - pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission - pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission + pd_nof_cpu_regs_frame_map = Register::number_of_registers, // number of GP registers used during code emission + pd_nof_fpu_regs_frame_map = FloatRegister::number_of_registers, // number of FP registers used during code emission pd_nof_caller_save_cpu_regs_frame_map = 19 - 2 /* rscratch1 and rscratch2 */ R18_RESERVED_ONLY(- 1), // number of registers killed by calls pd_nof_caller_save_fpu_regs_frame_map = 32, // number of registers killed by calls diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 2869baa7a3973..be91a35a7fad4 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -331,7 +331,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) { if (o == NULL) { __ mov(reg, zr); } else { - __ movoop(reg, o, /*immediate*/true); + __ movoop(reg, o); } } diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp index 6c7936a973488..c076f4312b175 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp @@ -188,16 +188,11 @@ bool frame::safe_for_sender(JavaThread *thread) { } // We must always be able to find a recognizable pc - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); if (sender_pc == NULL || sender_blob == NULL) { return false; } - // Could be a zombie method - if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { - return false; - } - // Could just be some random pointer within the codeBlob if (!sender_blob->code_contains(sender_pc)) { return false; diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp index d41383443d485..e64d2e73b5930 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp @@ -165,10 +165,8 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) { DEBUG_ONLY(_frame_index = -1;) // Here's a sticky one. This constructor can be called via AsyncGetCallTrace - // when last_Java_sp is non-null but the pc fetched is junk. If we are truly - // unlucky the junk value could be to a zombied method and we'll die on the - // find_blob call. This is also why we can have no asserts on the validity - // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler + // when last_Java_sp is non-null but the pc fetched is junk. + // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler // -> pd_last_frame should use a specialized version of pd_last_frame which could // call a specilaized frame constructor instead of this one. // Then we could use the assert below. However this assert is of somewhat dubious diff --git a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp index e828ee7109458..19b7d1839154f 100644 --- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp @@ -281,18 +281,18 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator } void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2) { + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { // flatten object address if needed if (dst.index() == noreg && dst.offset() == 0) { - if (dst.base() != r3) { - __ mov(r3, dst.base()); + if (dst.base() != tmp3) { + __ mov(tmp3, dst.base()); } } else { - __ lea(r3, dst); + __ lea(tmp3, dst); } g1_write_barrier_pre(masm, - r3 /* obj */, + tmp3 /* obj */, tmp2 /* pre_val */, rthread /* thread */, tmp1 /* tmp */, @@ -300,7 +300,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco false /* expand_call */); if (val == noreg) { - BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg); + BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg); } else { // G1 barrier needs uncompressed oop for region cross check. Register new_val = val; @@ -308,9 +308,9 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco new_val = rscratch2; __ mov(new_val, val); } - BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg); + BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg); g1_write_barrier_post(masm, - r3 /* store_adr */, + tmp3 /* store_adr */, new_val /* new_val */, rthread /* thread */, tmp1 /* tmp */, diff --git a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp index e38746f4f9edf..9856fc56711ff 100644 --- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp @@ -57,7 +57,7 @@ class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { Register tmp2); virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2); + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); public: #ifdef COMPILER1 diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp index c8bae57f0ea7d..db3913b1d2273 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp @@ -80,7 +80,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, } void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2) { + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { bool in_heap = (decorators & IN_HEAP) != 0; bool in_native = (decorators & IN_NATIVE) != 0; switch (type) { diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp index 390da48327e14..74e68eb49dabd 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp @@ -51,7 +51,7 @@ class BarrierSetAssembler: public CHeapObj { virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2); + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, Register obj, Register tmp, Label& slowpath); @@ -72,6 +72,12 @@ class BarrierSetAssembler: public CHeapObj { virtual void nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard); virtual void c2i_entry_barrier(MacroAssembler* masm); + virtual bool supports_instruction_patching() { + NMethodPatchingType patching_type = nmethod_patching_type(); + return patching_type == NMethodPatchingType::conc_instruction_and_data_patch || + patching_type == NMethodPatchingType::stw_instruction_and_data_patch; + } + static address patching_epoch_addr(); static void clear_patching_epoch(); static void increment_patching_epoch(); diff --git a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp index f94f2b9c902a0..6afdb285914fb 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp @@ -78,21 +78,21 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl } void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2) { + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { bool in_heap = (decorators & IN_HEAP) != 0; bool is_array = (decorators & IS_ARRAY) != 0; bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; bool precise = is_array || on_anonymous; bool needs_post_barrier = val != noreg && in_heap; - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg); + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg, noreg); if (needs_post_barrier) { // flatten object address if needed if (!precise || (dst.index() == noreg && dst.offset() == 0)) { store_check(masm, dst.base(), dst); } else { - __ lea(r3, dst); - store_check(masm, r3, dst); + __ lea(tmp3, dst); + store_check(masm, tmp3, dst); } } } diff --git a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp index 844a959579774..d0d5e4c3d4c29 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp @@ -35,7 +35,7 @@ class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register start, Register count, Register tmp, RegSet saved_regs); virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2); + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); }; diff --git a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp index 3c18abc7a1218..a50152d244fba 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp @@ -45,10 +45,10 @@ void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Decorat } void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2) { + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { if (is_reference_type(type)) { - oop_store_at(masm, decorators, type, dst, val, tmp1, tmp2); + oop_store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); } else { - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); } } diff --git a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp index 00e36b919256e..22f98441f4ea6 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp @@ -40,7 +40,7 @@ class ModRefBarrierSetAssembler: public BarrierSetAssembler { Register start, Register count, Register tmp, RegSet saved_regs) {} virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2) = 0; + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) = 0; public: virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, @@ -48,7 +48,7 @@ class ModRefBarrierSetAssembler: public BarrierSetAssembler { virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register start, Register count, Register tmp, RegSet saved_regs); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2); + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); }; #endif // CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp index 86640896d20fd..de3023ae99efa 100644 --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp @@ -374,24 +374,24 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d } void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2) { + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { bool on_oop = is_reference_type(type); if (!on_oop) { - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); return; } // flatten object address if needed if (dst.index() == noreg && dst.offset() == 0) { - if (dst.base() != r3) { - __ mov(r3, dst.base()); + if (dst.base() != tmp3) { + __ mov(tmp3, dst.base()); } } else { - __ lea(r3, dst); + __ lea(tmp3, dst); } shenandoah_write_barrier_pre(masm, - r3 /* obj */, + tmp3 /* obj */, tmp2 /* pre_val */, rthread /* thread */, tmp1 /* tmp */, @@ -399,7 +399,7 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet false /* expand_call */); if (val == noreg) { - BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg); + BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg); } else { iu_barrier(masm, val, tmp1); // G1 barrier needs uncompressed oop for region cross check. @@ -408,7 +408,7 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet new_val = rscratch2; __ mov(new_val, val); } - BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg); + BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg); } } diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp index b02601540ca95..8fb152bc74309 100644 --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp @@ -76,7 +76,7 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Address dst, Register val, Register tmp1, Register tmp2); + Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, Register obj, Register tmp, Label& slowpath); void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val, diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp index 1270396c68348..1139358ce85c7 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp @@ -108,7 +108,8 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, Address dst, Register val, Register tmp1, - Register tmp2) { + Register tmp2, + Register tmp3) { // Verify value if (is_reference_type(type)) { // Note that src could be noreg, which means we @@ -116,7 +117,7 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, if (val != noreg) { Label done; - // tmp1 and tmp2 are often set to noreg. + // tmp1, tmp2 and tmp3 are often set to noreg. RegSet savedRegs = RegSet::of(rscratch1); __ push(savedRegs, sp); @@ -131,7 +132,7 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, } // Store value - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg); } #endif // ASSERT diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp index cd2c25469488d..9567758675fac 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp @@ -59,7 +59,8 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Address dst, Register val, Register tmp1, - Register tmp2); + Register tmp2, + Register tmp3); #endif // ASSERT virtual void arraycopy_prologue(MacroAssembler* masm, diff --git a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp index 42118f259dc8e..e0d9d6c0fb2d4 100644 --- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp @@ -175,11 +175,11 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) { // convert JVMCI register indices (as used in oop maps) to HotSpot registers VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, JVMCI_TRAPS) { - if (jvmci_reg < RegisterImpl::number_of_registers) { + if (jvmci_reg < Register::number_of_registers) { return as_Register(jvmci_reg)->as_VMReg(); } else { - jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_declared_registers; - if (floatRegisterNumber >= 0 && floatRegisterNumber < FloatRegisterImpl::number_of_registers) { + jint floatRegisterNumber = jvmci_reg - Register::number_of_declared_registers; + if (floatRegisterNumber >= 0 && floatRegisterNumber < FloatRegister::number_of_registers) { return as_FloatRegister(floatRegisterNumber)->as_VMReg(); } JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 8d1f99dddc11b..48c83773a7e5a 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -2198,7 +2198,7 @@ int MacroAssembler::push(unsigned int bitset, Register stack) { regs[count++] = reg; bitset >>= 1; } - regs[count++] = zr->encoding_nocheck(); + regs[count++] = zr->raw_encoding(); count &= ~1; // Only push an even number of regs if (count) { @@ -2228,7 +2228,7 @@ int MacroAssembler::pop(unsigned int bitset, Register stack) { regs[count++] = reg; bitset >>= 1; } - regs[count++] = zr->encoding_nocheck(); + regs[count++] = zr->raw_encoding(); count &= ~1; for (int i = 2; i < count; i += 2) { @@ -2383,9 +2383,9 @@ int MacroAssembler::push_p(unsigned int bitset, Register stack) { return 0; } - unsigned char regs[PRegisterImpl::number_of_saved_registers]; + unsigned char regs[PRegister::number_of_saved_registers]; int count = 0; - for (int reg = 0; reg < PRegisterImpl::number_of_saved_registers; reg++) { + for (int reg = 0; reg < PRegister::number_of_saved_registers; reg++) { if (1 & bitset) regs[count++] = reg; bitset >>= 1; @@ -2420,9 +2420,9 @@ int MacroAssembler::pop_p(unsigned int bitset, Register stack) { return 0; } - unsigned char regs[PRegisterImpl::number_of_saved_registers]; + unsigned char regs[PRegister::number_of_saved_registers]; int count = 0; - for (int reg = 0; reg < PRegisterImpl::number_of_saved_registers; reg++) { + for (int reg = 0; reg < PRegister::number_of_saved_registers; reg++) { if (1 & bitset) regs[count++] = reg; bitset >>= 1; @@ -2453,7 +2453,7 @@ void MacroAssembler::verify_heapbase(const char* msg) { if (CheckCompressedOops) { Label ok; push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 - cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr())); + cmpptr(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); br(Assembler::EQ, ok); stop(msg); bind(ok); @@ -2587,7 +2587,7 @@ void MacroAssembler::reinit_heapbase() if (Universe::is_fully_initialized()) { mov(rheapbase, CompressedOops::ptrs_base()); } else { - lea(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr())); + lea(rheapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); ldr(rheapbase, Address(rheapbase)); } } @@ -2910,8 +2910,8 @@ void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, int sve_vector_size_in_bytes, int total_predicate_in_bytes) { push(RegSet::range(r0, r29), sp); // integer registers except lr & sp if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { - sub(sp, sp, sve_vector_size_in_bytes * FloatRegisterImpl::number_of_registers); - for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) { + sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); + for (int i = 0; i < FloatRegister::number_of_registers; i++) { sve_str(as_FloatRegister(i), Address(sp, i)); } } else { @@ -2926,7 +2926,7 @@ void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, } if (save_vectors && use_sve && total_predicate_in_bytes > 0) { sub(sp, sp, total_predicate_in_bytes); - for (int i = 0; i < PRegisterImpl::number_of_saved_registers; i++) { + for (int i = 0; i < PRegister::number_of_saved_registers; i++) { sve_str(as_PRegister(i), Address(sp, i)); } } @@ -2935,16 +2935,16 @@ void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, int sve_vector_size_in_bytes, int total_predicate_in_bytes) { if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { - for (int i = PRegisterImpl::number_of_saved_registers - 1; i >= 0; i--) { + for (int i = PRegister::number_of_saved_registers - 1; i >= 0; i--) { sve_ldr(as_PRegister(i), Address(sp, i)); } add(sp, sp, total_predicate_in_bytes); } if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { - for (int i = FloatRegisterImpl::number_of_registers - 1; i >= 0; i--) { + for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { sve_ldr(as_FloatRegister(i), Address(sp, i)); } - add(sp, sp, sve_vector_size_in_bytes * FloatRegisterImpl::number_of_registers); + add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); } else { int step = (restore_vectors ? 8 : 4) * wordSize; for (int i = 0; i <= 28; i += 4) @@ -4402,14 +4402,14 @@ void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, - Register tmp1, Register thread_tmp) { + Register tmp1, Register tmp2, Register tmp3) { BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); decorators = AccessInternal::decorator_fixup(decorators); bool as_raw = (decorators & AS_RAW) != 0; if (as_raw) { - bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp); + bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3); } else { - bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp); + bs->store_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3); } } @@ -4424,13 +4424,13 @@ void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register } void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1, - Register thread_tmp, DecoratorSet decorators) { - access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); + Register tmp2, Register tmp3, DecoratorSet decorators) { + access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2, tmp3); } // Used for storing NULLs. void MacroAssembler::store_heap_oop_null(Address dst) { - access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg); + access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); } Address MacroAssembler::allocate_metadata_address(Metadata* obj) { @@ -4440,11 +4440,8 @@ Address MacroAssembler::allocate_metadata_address(Metadata* obj) { return Address((address)obj, rspec); } -// Move an oop into a register. immediate is true if we want -// immediate instructions and nmethod entry barriers are not enabled. -// i.e. we are not going to patch this instruction while the code is being -// executed by another thread. -void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { +// Move an oop into a register. +void MacroAssembler::movoop(Register dst, jobject obj) { int oop_index; if (obj == NULL) { oop_index = oop_recorder()->allocate_oop_index(obj); @@ -4459,15 +4456,12 @@ void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { } RelocationHolder rspec = oop_Relocation::spec(oop_index); - // nmethod entry barrier necessitate using the constant pool. They have to be - // ordered with respected to oop accesses. - // Using immediate literals would necessitate ISBs. - BarrierSet* bs = BarrierSet::barrier_set(); - if ((bs->barrier_set_nmethod() != NULL && bs->barrier_set_assembler()->nmethod_patching_type() == NMethodPatchingType::conc_data_patch) || !immediate) { + if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { + mov(dst, Address((address)obj, rspec)); + } else { address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address ldr_constant(dst, Address(dummy, rspec)); - } else - mov(dst, Address((address)obj, rspec)); + } } diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index 46e0166842c6d..377f046e618e7 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -851,7 +851,7 @@ class MacroAssembler: public Assembler { Register tmp1, Register tmp_thread); void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, - Register tmp1, Register tmp_thread); + Register tmp1, Register tmp2, Register tmp3); void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, Register thread_tmp = noreg, DecoratorSet decorators = 0); @@ -859,7 +859,7 @@ class MacroAssembler: public Assembler { void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, Register thread_tmp = noreg, DecoratorSet decorators = 0); void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, - Register tmp_thread = noreg, DecoratorSet decorators = 0); + Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); // currently unimplemented // Used for storing NULL. All other oop constants should be @@ -1223,7 +1223,7 @@ class MacroAssembler: public Assembler { Address allocate_metadata_address(Metadata* obj); Address constant_oop_address(jobject obj); - void movoop(Register dst, jobject obj, bool immediate = false); + void movoop(Register dst, jobject obj); // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. void kernel_crc32(Register crc, Register buf, Register len, diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_aes.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_aes.cpp index a4f34c229d4c2..03853a7ca46be 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_aes.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_aes.cpp @@ -231,27 +231,27 @@ class AESKernelGenerator: public KernelGenerator { br(Assembler::EQ, _rounds_52); } break; - case 2: aes_round(_data, _subkeys + 0); break; - case 3: aes_round(_data, _subkeys + 1); break; + case 2: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 0)); break; + case 3: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 1)); break; case 4: if (_once) bind(_rounds_52); break; - case 5: aes_round(_data, _subkeys + 2); break; - case 6: aes_round(_data, _subkeys + 3); break; + case 5: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 2)); break; + case 6: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 3)); break; case 7: if (_once) bind(_rounds_44); break; - case 8: aes_round(_data, _subkeys + 4); break; - case 9: aes_round(_data, _subkeys + 5); break; - case 10: aes_round(_data, _subkeys + 6); break; - case 11: aes_round(_data, _subkeys + 7); break; - case 12: aes_round(_data, _subkeys + 8); break; - case 13: aes_round(_data, _subkeys + 9); break; - case 14: aes_round(_data, _subkeys + 10); break; - case 15: aes_round(_data, _subkeys + 11); break; - case 16: aes_round(_data, _subkeys + 12); break; - case 17: aese(_data, _subkeys + 13); break; - case 18: eor(_data, T16B, _data, _subkeys + 14); break; + case 8: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 4)); break; + case 9: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 5)); break; + case 10: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 6)); break; + case 11: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 7)); break; + case 12: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 8)); break; + case 13: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 9)); break; + case 14: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 10)); break; + case 15: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 11)); break; + case 16: aes_round(_data, as_FloatRegister(_subkeys->encoding() + 12)); break; + case 17: aese(_data, as_FloatRegister(_subkeys->encoding() + 13)); break; + case 18: eor(_data, T16B, _data, as_FloatRegister(_subkeys->encoding() + 14)); break; case 19: if (_to != noreg) { st1(_data, T16B, _to); @@ -264,7 +264,7 @@ class AESKernelGenerator: public KernelGenerator { virtual KernelGenerator *next() { return new AESKernelGenerator(this, _unrolls, _from, _to, _keylen, - _data + 1, _subkeys, /*once*/false); + _data->successor(), _subkeys, /*once*/false); } virtual int length() { return 20; } @@ -409,14 +409,14 @@ class GHASHMultiplyGenerator: public KernelGenerator { } } - virtual KernelGenerator *next() { - GHASHMultiplyGenerator *result = new GHASHMultiplyGenerator(*this); - result->_result_lo += register_stride; - result->_result_hi += register_stride; - result->_b += register_stride; - result->_tmp1 += register_stride; - result->_tmp2 += register_stride; - result->_tmp3 += register_stride; + virtual KernelGenerator* next() { + GHASHMultiplyGenerator* result = new GHASHMultiplyGenerator(*this); + result->_result_lo = as_FloatRegister(result->_result_lo->encoding() + register_stride); + result->_result_hi = as_FloatRegister(result->_result_hi->encoding() + register_stride); + result->_b = as_FloatRegister(result->_b ->encoding() + register_stride); + result->_tmp1 = as_FloatRegister(result->_tmp1 ->encoding() + register_stride); + result->_tmp2 = as_FloatRegister(result->_tmp2 ->encoding() + register_stride); + result->_tmp3 = as_FloatRegister(result->_tmp3 ->encoding() + register_stride); return result; } @@ -477,17 +477,17 @@ class GHASHReduceGenerator: public KernelGenerator { if (_data->is_valid() && _once) { assert(length() >= unrolls(), "not enough room for inteleaved loads"); if (index < unrolls()) { - ld1((_data + index*register_stride), T16B, post(r2, 0x10)); + ld1(as_FloatRegister(_data->encoding() + index*register_stride), T16B, post(r2, 0x10)); } } } virtual KernelGenerator *next() { GHASHReduceGenerator *result = new GHASHReduceGenerator(*this); - result->_result += register_stride; - result->_hi += register_stride; - result->_lo += register_stride; - result->_t1 += register_stride; + result->_result = as_FloatRegister(result->_result->encoding() + register_stride); + result->_hi = as_FloatRegister(result->_hi ->encoding() + register_stride); + result->_lo = as_FloatRegister(result->_lo ->encoding() + register_stride); + result->_t1 = as_FloatRegister(result->_t1 ->encoding() + register_stride); result->_once = false; return result; } @@ -582,7 +582,8 @@ void MacroAssembler::ghash_processBlocks_wide(address field_polynomial, Register // v0 contains the initial state. Clear the others. for (int i = 1; i < unrolls; i++) { int ofs = register_stride * i; - eor(ofs+v0, T16B, ofs+v0, ofs+v0); // zero each state register + FloatRegister v0_ofs = as_FloatRegister(v0->encoding() + ofs); + eor(v0_ofs, T16B, v0_ofs, v0_ofs); // zero each state register } ext(a1_xor_a0, T16B, Hprime, Hprime, 0x08); // long-swap subkeyH into a1_xor_a0 @@ -590,7 +591,8 @@ void MacroAssembler::ghash_processBlocks_wide(address field_polynomial, Register // Load #unrolls blocks of data for (int ofs = 0; ofs < unrolls * register_stride; ofs += register_stride) { - ld1(v2+ofs, T16B, post(data, 0x10)); + FloatRegister v2_ofs = as_FloatRegister(v2->encoding() + ofs); + ld1(v2_ofs, T16B, post(data, 0x10)); } // Register assignments, replicated across 4 clones, v0 ... v23 @@ -623,8 +625,10 @@ void MacroAssembler::ghash_processBlocks_wide(address field_polynomial, Register // Xor data into current state for (int ofs = 0; ofs < unrolls * register_stride; ofs += register_stride) { - rbit((v2+ofs), T16B, (v2+ofs)); - eor((v2+ofs), T16B, v0+ofs, (v2+ofs)); // bit-swapped data ^ bit-swapped state + FloatRegister v0_ofs = as_FloatRegister(v0->encoding() + ofs); + FloatRegister v2_ofs = as_FloatRegister(v2->encoding() + ofs); + rbit(v2_ofs, T16B, v2_ofs); + eor(v2_ofs, T16B, v0_ofs, v2_ofs); // bit-swapped data ^ bit-swapped state } // Generate fully-unrolled multiply-reduce in two stages. @@ -651,24 +655,31 @@ void MacroAssembler::ghash_processBlocks_wide(address field_polynomial, Register // First, we multiply/reduce each clone by the appropriate power of H. for (int i = 0; i < unrolls; i++) { int ofs = register_stride * i; + FloatRegister v0_ofs = as_FloatRegister(v0->encoding() + ofs); + FloatRegister v1_ofs = as_FloatRegister(v1->encoding() + ofs); + FloatRegister v2_ofs = as_FloatRegister(v2->encoding() + ofs); + FloatRegister v3_ofs = as_FloatRegister(v3->encoding() + ofs); + FloatRegister v4_ofs = as_FloatRegister(v4->encoding() + ofs); + FloatRegister v5_ofs = as_FloatRegister(v5->encoding() + ofs); + ldrq(Hprime, Address(subkeyH, 16 * (unrolls - i - 1))); - rbit(v2+ofs, T16B, v2+ofs); - eor(v2+ofs, T16B, ofs+v0, v2+ofs); // bit-swapped data ^ bit-swapped state + rbit(v2_ofs, T16B, v2_ofs); + eor(v2_ofs, T16B, v0_ofs, v2_ofs); // bit-swapped data ^ bit-swapped state rev64(Hprime, T16B, Hprime); rbit(Hprime, T16B, Hprime); ext(a1_xor_a0, T16B, Hprime, Hprime, 0x08); // long-swap subkeyH into a1_xor_a0 eor(a1_xor_a0, T16B, a1_xor_a0, Hprime); // xor subkeyH into subkeyL (Karatsuba: (A1+A0)) - ghash_modmul(/*result*/v0+ofs, /*result_lo*/v5+ofs, /*result_hi*/v4+ofs, /*b*/v2+ofs, + ghash_modmul(/*result*/v0_ofs, /*result_lo*/v5_ofs, /*result_hi*/v4_ofs, /*b*/v2_ofs, Hprime, vzr, a1_xor_a0, p, - /*temps*/v1+ofs, v3+ofs, /* reuse b*/v2+ofs); + /*temps*/v1_ofs, v3_ofs, /* reuse b*/v2_ofs); } // Then we sum the results. - for (int i = 0; i < unrolls - 1; i++) { - int ofs = register_stride * i; - eor(v0, T16B, v0, v0 + register_stride + ofs); + for (int i = 1; i < unrolls; i++) { + FloatRegister v0_ofs = as_FloatRegister(v0->encoding() + register_stride * i); + eor(v0, T16B, v0, v0_ofs); } sub(blocks, blocks, (unsigned char)unrolls); diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp index 976ef786402df..b154c434069c1 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp @@ -160,7 +160,7 @@ address NativeCall::destination() const { address destination = instruction_address() + displacement(); // Do we use a trampoline stub for this call? - CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. + CodeBlob* cb = CodeCache::find_blob(addr); assert(cb && cb->is_nmethod(), "sanity"); nmethod *nm = (nmethod *)cb; if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { @@ -456,7 +456,7 @@ bool NativeInstruction::is_movk() { return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101; } -bool NativeInstruction::is_sigill_zombie_not_entrant() { +bool NativeInstruction::is_sigill_not_entrant() { return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead } @@ -471,13 +471,13 @@ bool NativeInstruction::is_stop() { //------------------------------------------------------------------- // MT-safe inserting of a jump over a jump or a nop (used by -// nmethod::make_not_entrant_or_zombie) +// nmethod::make_not_entrant) void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() - || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), + || nativeInstruction_at(verified_entry)->is_sigill_not_entrant(), "Aarch64 cannot replace non-jump with jump"); // Patch this nmethod atomically. @@ -488,8 +488,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff); *(unsigned int*)verified_entry = insn; } else { - // We use an illegal instruction for marking a method as - // not_entrant or zombie. + // We use an illegal instruction for marking a method as not_entrant. NativeIllegalInstruction::insert(verified_entry); } diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp index 70b9c07f7ad06..8c220ada58475 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp @@ -79,7 +79,7 @@ class NativeInstruction { bool is_safepoint_poll(); bool is_movz(); bool is_movk(); - bool is_sigill_zombie_not_entrant(); + bool is_sigill_not_entrant(); bool is_stop(); protected: diff --git a/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp b/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp index dc5f0a096cbe0..6e36e877fdcbe 100644 --- a/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp @@ -34,7 +34,7 @@ address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const { // the upper slots by offsetting from the base address. assert(base_reg->is_concrete(), "must pass base reg"); int base_reg_enc = (base_reg->value() - ConcreteRegisterImpl::max_gpr) / - FloatRegisterImpl::max_slots_per_register; + FloatRegister::max_slots_per_register; intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size; address base_location = location(base_reg, nullptr); if (base_location != NULL) { diff --git a/src/hotspot/cpu/aarch64/register_aarch64.cpp b/src/hotspot/cpu/aarch64/register_aarch64.cpp index f95455106aec5..3a46e38a72a76 100644 --- a/src/hotspot/cpu/aarch64/register_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/register_aarch64.cpp @@ -26,45 +26,36 @@ #include "precompiled.hpp" #include "register_aarch64.hpp" -REGISTER_IMPL_DEFINITION(Register, RegisterImpl, RegisterImpl::number_of_declared_registers); -REGISTER_IMPL_DEFINITION(FloatRegister, FloatRegisterImpl, FloatRegisterImpl::number_of_registers); -REGISTER_IMPL_DEFINITION(PRegister, PRegisterImpl, PRegisterImpl::number_of_registers); +Register::RegisterImpl all_RegisterImpls [Register::number_of_declared_registers + 1]; +FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1]; +PRegister::PRegisterImpl all_PRegisterImpls [PRegister::number_of_registers + 1]; -const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers * - RegisterImpl::max_slots_per_register; - -const int ConcreteRegisterImpl::max_fpr - = ConcreteRegisterImpl::max_gpr + - FloatRegisterImpl::number_of_registers * FloatRegisterImpl::max_slots_per_register; - -const int ConcreteRegisterImpl::max_pr - = ConcreteRegisterImpl::max_fpr + - PRegisterImpl::number_of_registers * PRegisterImpl::max_slots_per_register; - -const char* RegisterImpl::name() const { - static const char *const names[number_of_registers] = { +const char* Register::RegisterImpl::name() const { + static const char *const names[number_of_declared_registers + 1] = { + "noreg", "c_rarg0", "c_rarg1", "c_rarg2", "c_rarg3", "c_rarg4", "c_rarg5", "c_rarg6", "c_rarg7", "rscratch1", "rscratch2", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18_tls", "r19", "resp", "rdispatch", "rbcp", "r23", - "rlocals", "r25", "rcpool", "rheapbase", "rthread", "rfp", "lr", "sp" + "rlocals", "r25", "rcpool", "rheapbase", "rthread", "rfp", "lr", "r31_sp", + "zp", "sp" }; - return is_valid() ? names[encoding()] : "noreg"; + return names[raw_encoding() + 1]; } -const char* FloatRegisterImpl::name() const { +const char* FloatRegister::FloatRegisterImpl::name() const { static const char *const names[number_of_registers] = { "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" }; - return is_valid() ? names[encoding()] : "noreg"; + return is_valid() ? names[encoding()] : "fnoreg"; } -const char* PRegisterImpl::name() const { +const char* PRegister::PRegisterImpl::name() const { static const char *const names[number_of_registers] = { "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15" }; - return is_valid() ? names[encoding()] : "noreg"; + return is_valid() ? names[encoding()] : "pnoreg"; } diff --git a/src/hotspot/cpu/aarch64/register_aarch64.hpp b/src/hotspot/cpu/aarch64/register_aarch64.hpp index b51f319842204..b3ba8468b4f22 100644 --- a/src/hotspot/cpu/aarch64/register_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/register_aarch64.hpp @@ -32,62 +32,87 @@ class VMRegImpl; typedef VMRegImpl* VMReg; -// Use Register as shortcut -class RegisterImpl; -typedef const RegisterImpl* Register; +class Register { + private: + int _encoding; -inline constexpr Register as_Register(int encoding); + constexpr explicit Register(int encoding) : _encoding(encoding) {} -class RegisterImpl: public AbstractRegisterImpl { - static constexpr Register first(); - -public: + public: enum { - number_of_registers = 32, - number_of_declared_registers = 34, // Including SP and ZR. - max_slots_per_register = 2 + number_of_registers = 32, + number_of_declared_registers = 34, // Including SP and ZR. + max_slots_per_register = 2 }; - // derived registers, offsets, and addresses - const Register successor() const { return this + 1; } + class RegisterImpl: public AbstractRegisterImpl { + friend class Register; + + static constexpr const RegisterImpl* first(); + + public: + // accessors + int raw_encoding() const { return this - first(); } + int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); } + bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; } + + // derived registers, offsets, and addresses + inline Register successor() const; + + VMReg as_VMReg() const; + + const char* name() const; + }; - // construction inline friend constexpr Register as_Register(int encoding); - VMReg as_VMReg() const; + constexpr Register() : _encoding(-1) {} // noreg + + int operator==(const Register r) const { return _encoding == r._encoding; } + int operator!=(const Register r) const { return _encoding != r._encoding; } - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } - bool is_valid() const { return (unsigned)encoding_nocheck() < number_of_registers; } - const char* name() const; - int encoding_nocheck() const { return this - first(); } + const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; } }; +extern Register::RegisterImpl all_RegisterImpls[Register::number_of_declared_registers + 1] INTERNAL_VISIBILITY; -REGISTER_IMPL_DECLARATION(Register, RegisterImpl, RegisterImpl::number_of_declared_registers); +inline constexpr const Register::RegisterImpl* Register::RegisterImpl::first() { + return all_RegisterImpls + 1; +} -// The integer registers of the aarch64 architecture +constexpr Register noreg = Register(); -CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); +inline constexpr Register as_Register(int encoding) { + if (0 <= encoding && encoding < Register::number_of_declared_registers) { + return Register(encoding); + } + return noreg; +} + +inline Register Register::RegisterImpl::successor() const { + assert(is_valid(), "sanity"); + return as_Register(encoding() + 1); +} -CONSTANT_REGISTER_DECLARATION(Register, r0, (0)); -CONSTANT_REGISTER_DECLARATION(Register, r1, (1)); -CONSTANT_REGISTER_DECLARATION(Register, r2, (2)); -CONSTANT_REGISTER_DECLARATION(Register, r3, (3)); -CONSTANT_REGISTER_DECLARATION(Register, r4, (4)); -CONSTANT_REGISTER_DECLARATION(Register, r5, (5)); -CONSTANT_REGISTER_DECLARATION(Register, r6, (6)); -CONSTANT_REGISTER_DECLARATION(Register, r7, (7)); -CONSTANT_REGISTER_DECLARATION(Register, r8, (8)); -CONSTANT_REGISTER_DECLARATION(Register, r9, (9)); -CONSTANT_REGISTER_DECLARATION(Register, r10, (10)); -CONSTANT_REGISTER_DECLARATION(Register, r11, (11)); -CONSTANT_REGISTER_DECLARATION(Register, r12, (12)); -CONSTANT_REGISTER_DECLARATION(Register, r13, (13)); -CONSTANT_REGISTER_DECLARATION(Register, r14, (14)); -CONSTANT_REGISTER_DECLARATION(Register, r15, (15)); -CONSTANT_REGISTER_DECLARATION(Register, r16, (16)); -CONSTANT_REGISTER_DECLARATION(Register, r17, (17)); +// The integer registers of the AArch64 architecture +constexpr Register r0 = as_Register( 0); +constexpr Register r1 = as_Register( 1); +constexpr Register r2 = as_Register( 2); +constexpr Register r3 = as_Register( 3); +constexpr Register r4 = as_Register( 4); +constexpr Register r5 = as_Register( 5); +constexpr Register r6 = as_Register( 6); +constexpr Register r7 = as_Register( 7); +constexpr Register r8 = as_Register( 8); +constexpr Register r9 = as_Register( 9); +constexpr Register r10 = as_Register(10); +constexpr Register r11 = as_Register(11); +constexpr Register r12 = as_Register(12); +constexpr Register r13 = as_Register(13); +constexpr Register r14 = as_Register(14); +constexpr Register r15 = as_Register(15); +constexpr Register r16 = as_Register(16); +constexpr Register r17 = as_Register(17); // In the ABI for Windows+AArch64 the register r18 is used to store the pointer // to the current thread's TEB (where TLS variables are stored). We could @@ -99,151 +124,175 @@ CONSTANT_REGISTER_DECLARATION(Register, r17, (17)); // It's easier to avoid allocating r18 altogether. // // See https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=vs-2019#integer-registers -CONSTANT_REGISTER_DECLARATION(Register, r18_tls, (18)); -CONSTANT_REGISTER_DECLARATION(Register, r19, (19)); -CONSTANT_REGISTER_DECLARATION(Register, r20, (20)); -CONSTANT_REGISTER_DECLARATION(Register, r21, (21)); -CONSTANT_REGISTER_DECLARATION(Register, r22, (22)); -CONSTANT_REGISTER_DECLARATION(Register, r23, (23)); -CONSTANT_REGISTER_DECLARATION(Register, r24, (24)); -CONSTANT_REGISTER_DECLARATION(Register, r25, (25)); -CONSTANT_REGISTER_DECLARATION(Register, r26, (26)); -CONSTANT_REGISTER_DECLARATION(Register, r27, (27)); -CONSTANT_REGISTER_DECLARATION(Register, r28, (28)); -CONSTANT_REGISTER_DECLARATION(Register, r29, (29)); -CONSTANT_REGISTER_DECLARATION(Register, r30, (30)); +constexpr Register r18_tls = as_Register(18); +constexpr Register r19 = as_Register(19); +constexpr Register r20 = as_Register(20); +constexpr Register r21 = as_Register(21); +constexpr Register r22 = as_Register(22); +constexpr Register r23 = as_Register(23); +constexpr Register r24 = as_Register(24); +constexpr Register r25 = as_Register(25); +constexpr Register r26 = as_Register(26); +constexpr Register r27 = as_Register(27); +constexpr Register r28 = as_Register(28); +constexpr Register r29 = as_Register(29); +constexpr Register r30 = as_Register(30); // r31 is not a general purpose register, but represents either the // stack pointer or the zero/discard register depending on the // instruction. -CONSTANT_REGISTER_DECLARATION(Register, r31_sp, (31)); -CONSTANT_REGISTER_DECLARATION(Register, zr, (32)); -CONSTANT_REGISTER_DECLARATION(Register, sp, (33)); +constexpr Register r31_sp = as_Register(31); +constexpr Register zr = as_Register(32); +constexpr Register sp = as_Register(33); // Used as a filler in instructions where a register field is unused. -const Register dummy_reg = r31_sp; +constexpr Register dummy_reg = r31_sp; -// Use FloatRegister as shortcut -class FloatRegisterImpl; -typedef const FloatRegisterImpl* FloatRegister; - -inline constexpr FloatRegister as_FloatRegister(int encoding); // The implementation of floating point registers for the architecture -class FloatRegisterImpl: public AbstractRegisterImpl { - static constexpr FloatRegister first(); +class FloatRegister { + private: + int _encoding; + + constexpr explicit FloatRegister(int encoding) : _encoding(encoding) {} + + public: + inline friend constexpr FloatRegister as_FloatRegister(int encoding); -public: enum { - number_of_registers = 32, - max_slots_per_register = 4, - save_slots_per_register = 2, - slots_per_neon_register = 4, + number_of_registers = 32, + max_slots_per_register = 4, + save_slots_per_register = 2, + slots_per_neon_register = 4, extra_save_slots_per_neon_register = slots_per_neon_register - save_slots_per_register }; - // construction - inline friend constexpr FloatRegister as_FloatRegister(int encoding); + class FloatRegisterImpl: public AbstractRegisterImpl { + friend class FloatRegister; - VMReg as_VMReg() const; + static constexpr const FloatRegisterImpl* first(); - // derived registers, offsets, and addresses - FloatRegister successor() const { - return as_FloatRegister((encoding() + 1) % (unsigned)number_of_registers); - } + public: + // accessors + int raw_encoding() const { return this - first(); } + int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); } + bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; } - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } - bool is_valid() const { return (unsigned)encoding_nocheck() < number_of_registers; } - const char* name() const; - int encoding_nocheck() const { return this - first(); } + // derived registers, offsets, and addresses + inline FloatRegister successor() const; + + VMReg as_VMReg() const; + + const char* name() const; + }; + + constexpr FloatRegister() : _encoding(-1) {} // fnoreg + + int operator==(const FloatRegister r) const { return _encoding == r._encoding; } + int operator!=(const FloatRegister r) const { return _encoding != r._encoding; } + + const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; } }; -REGISTER_IMPL_DECLARATION(FloatRegister, FloatRegisterImpl, FloatRegisterImpl::number_of_registers); - - -// The float registers of the AARCH64 architecture - -CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg , (-1)); - -CONSTANT_REGISTER_DECLARATION(FloatRegister, v0 , ( 0)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v1 , ( 1)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v2 , ( 2)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v3 , ( 3)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v4 , ( 4)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v5 , ( 5)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v6 , ( 6)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v7 , ( 7)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v8 , ( 8)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v9 , ( 9)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v10 , (10)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v11 , (11)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v12 , (12)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v13 , (13)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v14 , (14)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v15 , (15)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v16 , (16)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v17 , (17)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v18 , (18)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v19 , (19)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v20 , (20)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v21 , (21)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v22 , (22)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v23 , (23)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v24 , (24)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v25 , (25)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v26 , (26)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v27 , (27)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v28 , (28)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v29 , (29)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v30 , (30)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, v31 , (31)); +extern FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1] INTERNAL_VISIBILITY; + +inline constexpr const FloatRegister::FloatRegisterImpl* FloatRegister::FloatRegisterImpl::first() { + return all_FloatRegisterImpls + 1; +} + +constexpr FloatRegister fnoreg = FloatRegister(); + +inline constexpr FloatRegister as_FloatRegister(int encoding) { + if (0 <= encoding && encoding < FloatRegister::number_of_registers) { + return FloatRegister(encoding); + } + return fnoreg; +} + +inline FloatRegister FloatRegister::FloatRegisterImpl::successor() const { + assert(is_valid(), "sanity"); + return as_FloatRegister((encoding() + 1) % number_of_registers); +} + +// The float registers of the AArch64 architecture +constexpr FloatRegister v0 = as_FloatRegister( 0); +constexpr FloatRegister v1 = as_FloatRegister( 1); +constexpr FloatRegister v2 = as_FloatRegister( 2); +constexpr FloatRegister v3 = as_FloatRegister( 3); +constexpr FloatRegister v4 = as_FloatRegister( 4); +constexpr FloatRegister v5 = as_FloatRegister( 5); +constexpr FloatRegister v6 = as_FloatRegister( 6); +constexpr FloatRegister v7 = as_FloatRegister( 7); +constexpr FloatRegister v8 = as_FloatRegister( 8); +constexpr FloatRegister v9 = as_FloatRegister( 9); +constexpr FloatRegister v10 = as_FloatRegister(10); +constexpr FloatRegister v11 = as_FloatRegister(11); +constexpr FloatRegister v12 = as_FloatRegister(12); +constexpr FloatRegister v13 = as_FloatRegister(13); +constexpr FloatRegister v14 = as_FloatRegister(14); +constexpr FloatRegister v15 = as_FloatRegister(15); +constexpr FloatRegister v16 = as_FloatRegister(16); +constexpr FloatRegister v17 = as_FloatRegister(17); +constexpr FloatRegister v18 = as_FloatRegister(18); +constexpr FloatRegister v19 = as_FloatRegister(19); +constexpr FloatRegister v20 = as_FloatRegister(20); +constexpr FloatRegister v21 = as_FloatRegister(21); +constexpr FloatRegister v22 = as_FloatRegister(22); +constexpr FloatRegister v23 = as_FloatRegister(23); +constexpr FloatRegister v24 = as_FloatRegister(24); +constexpr FloatRegister v25 = as_FloatRegister(25); +constexpr FloatRegister v26 = as_FloatRegister(26); +constexpr FloatRegister v27 = as_FloatRegister(27); +constexpr FloatRegister v28 = as_FloatRegister(28); +constexpr FloatRegister v29 = as_FloatRegister(29); +constexpr FloatRegister v30 = as_FloatRegister(30); +constexpr FloatRegister v31 = as_FloatRegister(31); // SVE vector registers, shared with the SIMD&FP v0-v31. Vn maps to Zn[127:0]. -CONSTANT_REGISTER_DECLARATION(FloatRegister, z0 , ( 0)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z1 , ( 1)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z2 , ( 2)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z3 , ( 3)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z4 , ( 4)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z5 , ( 5)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z6 , ( 6)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z7 , ( 7)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z8 , ( 8)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z9 , ( 9)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z10 , (10)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z11 , (11)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z12 , (12)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z13 , (13)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z14 , (14)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z15 , (15)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z16 , (16)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z17 , (17)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z18 , (18)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z19 , (19)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z20 , (20)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z21 , (21)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z22 , (22)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z23 , (23)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z24 , (24)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z25 , (25)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z26 , (26)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z27 , (27)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z28 , (28)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z29 , (29)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z30 , (30)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, z31 , (31)); - - -class PRegisterImpl; -typedef const PRegisterImpl* PRegister; -inline constexpr PRegister as_PRegister(int encoding); +constexpr FloatRegister z0 = v0; +constexpr FloatRegister z1 = v1; +constexpr FloatRegister z2 = v2; +constexpr FloatRegister z3 = v3; +constexpr FloatRegister z4 = v4; +constexpr FloatRegister z5 = v5; +constexpr FloatRegister z6 = v6; +constexpr FloatRegister z7 = v7; +constexpr FloatRegister z8 = v8; +constexpr FloatRegister z9 = v9; +constexpr FloatRegister z10 = v10; +constexpr FloatRegister z11 = v11; +constexpr FloatRegister z12 = v12; +constexpr FloatRegister z13 = v13; +constexpr FloatRegister z14 = v14; +constexpr FloatRegister z15 = v15; +constexpr FloatRegister z16 = v16; +constexpr FloatRegister z17 = v17; +constexpr FloatRegister z18 = v18; +constexpr FloatRegister z19 = v19; +constexpr FloatRegister z20 = v20; +constexpr FloatRegister z21 = v21; +constexpr FloatRegister z22 = v22; +constexpr FloatRegister z23 = v23; +constexpr FloatRegister z24 = v24; +constexpr FloatRegister z25 = v25; +constexpr FloatRegister z26 = v26; +constexpr FloatRegister z27 = v27; +constexpr FloatRegister z28 = v28; +constexpr FloatRegister z29 = v29; +constexpr FloatRegister z30 = v30; +constexpr FloatRegister z31 = v31; + // The implementation of predicate registers for the architecture -class PRegisterImpl: public AbstractRegisterImpl { - static constexpr PRegister first(); +class PRegister { + int _encoding; + + constexpr explicit PRegister(int encoding) : _encoding(encoding) {} + +public: + inline friend constexpr PRegister as_PRegister(int encoding); - public: enum { number_of_registers = 16, number_of_governing_registers = 8, @@ -255,66 +304,87 @@ class PRegisterImpl: public AbstractRegisterImpl { max_slots_per_register = 1 }; - // construction - inline friend constexpr PRegister as_PRegister(int encoding); + constexpr PRegister() : _encoding(-1) {} // pnoreg - VMReg as_VMReg() const; + class PRegisterImpl: public AbstractRegisterImpl { + friend class PRegister; - // derived registers, offsets, and addresses - PRegister successor() const { return this + 1; } + static constexpr const PRegisterImpl* first(); - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } - int encoding_nocheck() const { return this - first(); } - bool is_valid() const { return (unsigned)encoding_nocheck() < number_of_registers; } - bool is_governing() const { return first() <= this && this - first() < number_of_governing_registers; } - const char* name() const; + public: + // accessors + int raw_encoding() const { return this - first(); } + int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); } + bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; } + bool is_governing() const { return 0 <= raw_encoding() && raw_encoding() < number_of_governing_registers; } + + // derived registers, offsets, and addresses + inline PRegister successor() const; + + VMReg as_VMReg() const; + + const char* name() const; + }; + + int operator==(const PRegister r) const { return _encoding == r._encoding; } + int operator!=(const PRegister r) const { return _encoding != r._encoding; } + + const PRegisterImpl* operator->() const { return PRegisterImpl::first() + _encoding; } }; +extern PRegister::PRegisterImpl all_PRegisterImpls[PRegister::number_of_registers + 1] INTERNAL_VISIBILITY; + +inline constexpr const PRegister::PRegisterImpl* PRegister::PRegisterImpl::first() { + return all_PRegisterImpls + 1; +} -REGISTER_IMPL_DECLARATION(PRegister, PRegisterImpl, PRegisterImpl::number_of_registers); +constexpr PRegister pnoreg = PRegister(); + +inline constexpr PRegister as_PRegister(int encoding) { + if (0 <= encoding && encoding < PRegister::number_of_registers) { + return PRegister(encoding); + } + return pnoreg; +} + +inline PRegister PRegister::PRegisterImpl::successor() const { + assert(is_valid(), "sanity"); + return as_PRegister(encoding() + 1); +} // The predicate registers of SVE. -// -CONSTANT_REGISTER_DECLARATION(PRegister, pnoreg, (-1)); - -CONSTANT_REGISTER_DECLARATION(PRegister, p0, ( 0)); -CONSTANT_REGISTER_DECLARATION(PRegister, p1, ( 1)); -CONSTANT_REGISTER_DECLARATION(PRegister, p2, ( 2)); -CONSTANT_REGISTER_DECLARATION(PRegister, p3, ( 3)); -CONSTANT_REGISTER_DECLARATION(PRegister, p4, ( 4)); -CONSTANT_REGISTER_DECLARATION(PRegister, p5, ( 5)); -CONSTANT_REGISTER_DECLARATION(PRegister, p6, ( 6)); -CONSTANT_REGISTER_DECLARATION(PRegister, p7, ( 7)); -CONSTANT_REGISTER_DECLARATION(PRegister, p8, ( 8)); -CONSTANT_REGISTER_DECLARATION(PRegister, p9, ( 9)); -CONSTANT_REGISTER_DECLARATION(PRegister, p10, (10)); -CONSTANT_REGISTER_DECLARATION(PRegister, p11, (11)); -CONSTANT_REGISTER_DECLARATION(PRegister, p12, (12)); -CONSTANT_REGISTER_DECLARATION(PRegister, p13, (13)); -CONSTANT_REGISTER_DECLARATION(PRegister, p14, (14)); -CONSTANT_REGISTER_DECLARATION(PRegister, p15, (15)); +constexpr PRegister p0 = as_PRegister( 0); +constexpr PRegister p1 = as_PRegister( 1); +constexpr PRegister p2 = as_PRegister( 2); +constexpr PRegister p3 = as_PRegister( 3); +constexpr PRegister p4 = as_PRegister( 4); +constexpr PRegister p5 = as_PRegister( 5); +constexpr PRegister p6 = as_PRegister( 6); +constexpr PRegister p7 = as_PRegister( 7); +constexpr PRegister p8 = as_PRegister( 8); +constexpr PRegister p9 = as_PRegister( 9); +constexpr PRegister p10 = as_PRegister(10); +constexpr PRegister p11 = as_PRegister(11); +constexpr PRegister p12 = as_PRegister(12); +constexpr PRegister p13 = as_PRegister(13); +constexpr PRegister p14 = as_PRegister(14); +constexpr PRegister p15 = as_PRegister(15); // Need to know the total number of registers of all sorts for SharedInfo. // Define a class that exports it. class ConcreteRegisterImpl : public AbstractRegisterImpl { public: enum { - // A big enough number for C2: all the registers plus flags - // This number must be large enough to cover REG_COUNT (defined by c2) registers. - // There is no requirement that any ordering here matches any ordering c2 gives - // it's optoregs. - - number_of_registers = (RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers + - FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers + - PRegisterImpl::max_slots_per_register * PRegisterImpl::number_of_registers + - 1) // flags + max_gpr = Register::number_of_registers * Register::max_slots_per_register, + max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register, + max_pr = max_fpr + PRegister::number_of_registers * PRegister::max_slots_per_register, + + // A big enough number for C2: all the registers plus flags + // This number must be large enough to cover REG_COUNT (defined by c2) registers. + // There is no requirement that any ordering here matches any ordering c2 gives + // it's optoregs. + number_of_registers = max_pr + 1 // gpr/fpr/pr + flags }; - - // added to make it compile - static const int max_gpr; - static const int max_fpr; - static const int max_pr; }; typedef AbstractRegSet RegSet; diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index 2640157a818a6..3f484b4c52ba9 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -120,9 +120,9 @@ class RegisterSaver { // setting for it. We must therefore force the layout // so that it agrees with the frame sender code. r0_off = fpu_state_off + FPUStateSizeInWords, - rfp_off = r0_off + (RegisterImpl::number_of_registers - 2) * RegisterImpl::max_slots_per_register, - return_off = rfp_off + RegisterImpl::max_slots_per_register, // slot for return address - reg_save_size = return_off + RegisterImpl::max_slots_per_register}; + rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register, + return_off = rfp_off + Register::max_slots_per_register, // slot for return address + reg_save_size = return_off + Register::max_slots_per_register}; }; @@ -132,11 +132,11 @@ int RegisterSaver::reg_offset_in_bytes(Register r) { // offset depends on whether we are saving full vectors, and whether // those vectors are NEON or SVE. - int slots_per_vect = FloatRegisterImpl::save_slots_per_register; + int slots_per_vect = FloatRegister::save_slots_per_register; #if COMPILER2_OR_JVMCI if (_save_vectors) { - slots_per_vect = FloatRegisterImpl::slots_per_neon_register; + slots_per_vect = FloatRegister::slots_per_neon_register; #ifdef COMPILER2 if (Matcher::supports_scalable_vector()) { @@ -146,7 +146,7 @@ int RegisterSaver::reg_offset_in_bytes(Register r) { } #endif - int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegisterImpl::number_of_registers) * BytesPerInt; + int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt; return r0_offset + r->encoding() * wordSize; } @@ -164,7 +164,7 @@ int RegisterSaver::total_sve_predicate_in_bytes() { // of 16 bytes so we manually align it up. return align_up(Matcher::scalable_predicate_reg_slots() * VMRegImpl::stack_slot_size * - PRegisterImpl::number_of_saved_registers, 16); + PRegister::number_of_saved_registers, 16); } #endif return 0; @@ -192,13 +192,13 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_ int extra_save_slots_per_register = 0; // Save upper half of vector registers if (use_sve) { - extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegisterImpl::save_slots_per_register; + extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register; } else { - extra_save_slots_per_register = FloatRegisterImpl::extra_save_slots_per_neon_register; + extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register; } int extra_vector_bytes = extra_save_slots_per_register * VMRegImpl::stack_slot_size * - FloatRegisterImpl::number_of_registers; + FloatRegister::number_of_registers; additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize); } #else @@ -227,31 +227,31 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_ OopMapSet *oop_maps = new OopMapSet(); OopMap* oop_map = new OopMap(frame_size_in_slots, 0); - for (int i = 0; i < RegisterImpl::number_of_registers; i++) { + for (int i = 0; i < Register::number_of_registers; i++) { Register r = as_Register(i); - if (r <= rfp && r != rscratch1 && r != rscratch2) { + if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) { // SP offsets are in 4-byte words. // Register slots are 8 bytes wide, 32 floating-point registers. - int sp_offset = RegisterImpl::max_slots_per_register * i + - FloatRegisterImpl::save_slots_per_register * FloatRegisterImpl::number_of_registers; + int sp_offset = Register::max_slots_per_register * i + + FloatRegister::save_slots_per_register * FloatRegister::number_of_registers; oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg()); } } - for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) { + for (int i = 0; i < FloatRegister::number_of_registers; i++) { FloatRegister r = as_FloatRegister(i); int sp_offset = 0; if (_save_vectors) { sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) : - (FloatRegisterImpl::slots_per_neon_register * i); + (FloatRegister::slots_per_neon_register * i); } else { - sp_offset = FloatRegisterImpl::save_slots_per_register * i; + sp_offset = FloatRegister::save_slots_per_register * i; } oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg()); } if (_save_vectors && use_sve) { - for (int i = 0; i < PRegisterImpl::number_of_saved_registers; i++) { + for (int i = 0; i < PRegister::number_of_saved_registers; i++) { PRegister r = as_PRegister(i); int sp_offset = sve_predicate_size_in_slots * i; oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg()); @@ -290,8 +290,8 @@ bool SharedRuntime::is_wide_vector(int size) { // refer to 4-byte stack slots. All stack slots are based off of the stack pointer // as framesizes are fixed. // VMRegImpl::stack0 refers to the first slot 0(sp). -// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register -// up to RegisterImpl::number_of_registers) are the 64-bit +// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. +// Register up to Register::number_of_registers are the 64-bit // integer registers. // Note: the INPUTS in sig_bt are in units of Java argument words, @@ -1469,12 +1469,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, int int_args = 0; #ifdef ASSERT - bool reg_destroyed[RegisterImpl::number_of_registers]; - bool freg_destroyed[FloatRegisterImpl::number_of_registers]; - for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { + bool reg_destroyed[Register::number_of_registers]; + bool freg_destroyed[FloatRegister::number_of_registers]; + for ( int r = 0 ; r < Register::number_of_registers ; r++ ) { reg_destroyed[r] = false; } - for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { + for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) { freg_destroyed[f] = false; } @@ -1554,8 +1554,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // load oop into a register __ movoop(c_rarg1, - JNIHandles::make_local(method->method_holder()->java_mirror()), - /*immediate*/true); + JNIHandles::make_local(method->method_holder()->java_mirror())); // Now handlize the static class mirror it's known not-null. __ str(c_rarg1, Address(sp, klass_offset)); diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index 94eb2d608a805..6d98d9c07e1c8 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -1853,7 +1853,7 @@ class StubGenerator: public StubCodeGenerator { __ align(OptoLoopAlignment); __ BIND(L_store_element); - __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, AS_RAW); // store the oop + __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, noreg, AS_RAW); // store the oop __ sub(count, count, 1); __ cbz(count, L_do_card_marks); @@ -3045,8 +3045,9 @@ class StubGenerator: public StubCodeGenerator { __ movi(v9, __ T4S, 1); __ ins(v8, __ S, v9, 3, 3); // v8 contains { 0, 0, 0, 1 } - for (FloatRegister f = v0; f < v0 + bulk_width; f++) { - __ rev32(f, __ T16B, v16); + for (int i = 0; i < bulk_width; i++) { + FloatRegister v0_ofs = as_FloatRegister(v0->encoding() + i); + __ rev32(v0_ofs, __ T16B, v16); __ addv(v16, __ T4S, v16, v8); } @@ -3061,7 +3062,9 @@ class StubGenerator: public StubCodeGenerator { // XOR the encrypted counters with the inputs for (int i = 0; i < bulk_width; i++) { - __ eor(v0 + i, __ T16B, v0 + i, v8 + i); + FloatRegister v0_ofs = as_FloatRegister(v0->encoding() + i); + FloatRegister v8_ofs = as_FloatRegister(v8->encoding() + i); + __ eor(v0_ofs, __ T16B, v0_ofs, v8_ofs); } // Write the encrypted data @@ -3162,7 +3165,10 @@ class StubGenerator: public StubCodeGenerator { __ movi(v8, __ T4S, 0); __ movi(v9, __ T4S, 1); __ ins(v8, __ S, v9, 3, 3); // v8 contains { 0, 0, 0, 1 } - for (FloatRegister f = v0; f < v8; f++) { + + assert(v0->encoding() < v8->encoding(), ""); + for (int i = v0->encoding(); i < v8->encoding(); i++) { + FloatRegister f = as_FloatRegister(i); __ rev32(f, __ T16B, v16); __ addv(v16, __ T4S, v16, v8); } @@ -3176,7 +3182,9 @@ class StubGenerator: public StubCodeGenerator { // XOR the encrypted counters with the inputs for (int i = 0; i < 8; i++) { - __ eor(v0 + i, __ T16B, v0 + i, v8 + i); + FloatRegister v0_ofs = as_FloatRegister(v0->encoding() + i); + FloatRegister v8_ofs = as_FloatRegister(v8->encoding() + i); + __ eor(v0_ofs, __ T16B, v0_ofs, v8_ofs); } __ st1(v0, v1, v2, v3, __ T16B, __ post(out, 4 * 16)); __ st1(v4, v5, v6, v7, __ T16B, __ post(out, 4 * 16)); @@ -7246,7 +7254,8 @@ class StubGenerator: public StubCodeGenerator { // Preserves len // Leaves s pointing to the address which was in d at start void reverse(Register d, Register s, Register len, Register tmp1, Register tmp2) { - assert(tmp1 < r19 && tmp2 < r19, "register corruption"); + assert(tmp1->encoding() < r19->encoding(), "register corruption"); + assert(tmp2->encoding() < r19->encoding(), "register corruption"); lea(s, Address(s, len, Address::uxtw(LogBytesPerWord))); mov(tmp1, len); diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp index 37d07e84321eb..5ccecb4fc89e5 100644 --- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp @@ -1112,7 +1112,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // native method than the typical interpreter frame setup. address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // determine code generation flags - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // r1: Method* // rscratch1: sender sp @@ -1536,7 +1536,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { // determine code generation flags - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // rscratch1: sender sp address entry_point = __ pc(); diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index a31e51c60701f..981c63bef38af 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -146,7 +146,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm, Register val, DecoratorSet decorators) { assert(val == noreg || val == r0, "parameter is just for looks"); - __ store_heap_oop(dst, val, r10, r1, decorators); + __ store_heap_oop(dst, val, r10, r1, r3, decorators); } static void do_oop_load(InterpreterMacroAssembler* _masm, @@ -1059,7 +1059,7 @@ void TemplateTable::iastore() { // r3: array index_check(r3, r1); // prefer index in r1 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2); - __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg); + __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg, noreg); } void TemplateTable::lastore() { @@ -1071,7 +1071,7 @@ void TemplateTable::lastore() { // r3: array index_check(r3, r1); // prefer index in r1 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3); - __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg); + __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg, noreg); } void TemplateTable::fastore() { @@ -1083,7 +1083,7 @@ void TemplateTable::fastore() { // r3: array index_check(r3, r1); // prefer index in r1 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2); - __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg); + __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg, noreg); } void TemplateTable::dastore() { @@ -1095,7 +1095,7 @@ void TemplateTable::dastore() { // r3: array index_check(r3, r1); // prefer index in r1 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3); - __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg); + __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg, noreg); } void TemplateTable::aastore() { @@ -1172,7 +1172,7 @@ void TemplateTable::bastore() __ bind(L_skip); __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0); - __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg); + __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg, noreg); } void TemplateTable::castore() @@ -1185,7 +1185,7 @@ void TemplateTable::castore() // r3: array index_check(r3, r1); // prefer index in r1 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1); - __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg); + __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg, noreg); } void TemplateTable::sastore() @@ -2687,7 +2687,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(btos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no); } @@ -2702,7 +2702,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(ztos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no); } @@ -2733,7 +2733,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(itos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no); } @@ -2748,7 +2748,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(ctos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no); } @@ -2763,7 +2763,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(stos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no); } @@ -2778,7 +2778,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(ltos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no); } @@ -2793,7 +2793,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(ftos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg); + __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no); } @@ -2810,7 +2810,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { __ pop(dtos); if (!is_static) pop_and_check_object(obj); - __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg); + __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg); if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no); } @@ -2945,28 +2945,28 @@ void TemplateTable::fast_storefield(TosState state) do_oop_store(_masm, field, r0, IN_HEAP); break; case Bytecodes::_fast_lputfield: - __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg, noreg); break; case Bytecodes::_fast_iputfield: - __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg, noreg); break; case Bytecodes::_fast_zputfield: - __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg, noreg); break; case Bytecodes::_fast_bputfield: - __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg, noreg); break; case Bytecodes::_fast_sputfield: - __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg, noreg); break; case Bytecodes::_fast_cputfield: - __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg); + __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg, noreg); break; case Bytecodes::_fast_fputfield: - __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg); + __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg, noreg); break; case Bytecodes::_fast_dputfield: - __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg); + __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg, noreg); break; default: ShouldNotReachHere(); diff --git a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp index 9d6563acb712b..f275f34f4ce5e 100644 --- a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp @@ -39,7 +39,7 @@ // for callee saved regs, according to the caller's ABI static int compute_reg_save_area_size(const ABIDescriptor& abi) { int size = 0; - for (int i = 0; i < RegisterImpl::number_of_registers; i++) { + for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); if (reg == rfp || reg == sp) continue; // saved/restored by prologue/epilogue if (!abi.is_volatile_reg(reg)) { @@ -47,7 +47,7 @@ static int compute_reg_save_area_size(const ABIDescriptor& abi) { } } - for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) { + for (int i = 0; i < FloatRegister::number_of_registers; i++) { FloatRegister reg = as_FloatRegister(i); if (!abi.is_volatile_reg(reg)) { // Only the lower 64 bits of vector registers need to be preserved. @@ -66,7 +66,7 @@ static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDesc int offset = reg_save_area_offset; __ block_comment("{ preserve_callee_saved_regs "); - for (int i = 0; i < RegisterImpl::number_of_registers; i++) { + for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); if (reg == rfp || reg == sp) continue; // saved/restored by prologue/epilogue if (!abi.is_volatile_reg(reg)) { @@ -75,7 +75,7 @@ static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDesc } } - for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) { + for (int i = 0; i < FloatRegister::number_of_registers; i++) { FloatRegister reg = as_FloatRegister(i); if (!abi.is_volatile_reg(reg)) { __ strd(reg, Address(sp, offset)); @@ -94,7 +94,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr int offset = reg_save_area_offset; __ block_comment("{ restore_callee_saved_regs "); - for (int i = 0; i < RegisterImpl::number_of_registers; i++) { + for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); if (reg == rfp || reg == sp) continue; // saved/restored by prologue/epilogue if (!abi.is_volatile_reg(reg)) { @@ -103,7 +103,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr } } - for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) { + for (int i = 0; i < FloatRegister::number_of_registers; i++) { FloatRegister reg = as_FloatRegister(i); if (!abi.is_volatile_reg(reg)) { __ ldrd(reg, Address(sp, offset)); diff --git a/src/hotspot/cpu/aarch64/vmreg_aarch64.cpp b/src/hotspot/cpu/aarch64/vmreg_aarch64.cpp index 3633f149c3daa..49adb39834005 100644 --- a/src/hotspot/cpu/aarch64/vmreg_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vmreg_aarch64.cpp @@ -33,7 +33,7 @@ void VMRegImpl::set_regName() { Register reg = ::as_Register(0); int i; for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) { - for (int j = 0 ; j < RegisterImpl::max_slots_per_register ; j++) { + for (int j = 0 ; j < Register::max_slots_per_register ; j++) { regName[i++] = reg->name(); } reg = reg->successor(); @@ -41,7 +41,7 @@ void VMRegImpl::set_regName() { FloatRegister freg = ::as_FloatRegister(0); for ( ; i < ConcreteRegisterImpl::max_fpr ; ) { - for (int j = 0 ; j < FloatRegisterImpl::max_slots_per_register ; j++) { + for (int j = 0 ; j < FloatRegister::max_slots_per_register ; j++) { regName[i++] = freg->name(); } freg = freg->successor(); diff --git a/src/hotspot/cpu/aarch64/vmreg_aarch64.hpp b/src/hotspot/cpu/aarch64/vmreg_aarch64.hpp index b16202795be01..bf93372d9cd2f 100644 --- a/src/hotspot/cpu/aarch64/vmreg_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/vmreg_aarch64.hpp @@ -41,27 +41,27 @@ inline bool is_PRegister() { inline Register as_Register() { assert( is_Register(), "must be"); // Yuk - return ::as_Register(value() / RegisterImpl::max_slots_per_register); + return ::as_Register(value() / Register::max_slots_per_register); } inline FloatRegister as_FloatRegister() { assert( is_FloatRegister() && is_even(value()), "must be" ); // Yuk return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) / - FloatRegisterImpl::max_slots_per_register); + FloatRegister::max_slots_per_register); } inline PRegister as_PRegister() { assert( is_PRegister(), "must be" ); return ::as_PRegister((value() - ConcreteRegisterImpl::max_fpr) / - PRegisterImpl::max_slots_per_register); + PRegister::max_slots_per_register); } inline bool is_concrete() { assert(is_reg(), "must be"); if (is_FloatRegister()) { int base = value() - ConcreteRegisterImpl::max_gpr; - return base % FloatRegisterImpl::max_slots_per_register == 0; + return (base % FloatRegister::max_slots_per_register) == 0; } else if (is_PRegister()) { return true; // Single slot } else { diff --git a/src/hotspot/cpu/aarch64/vmreg_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/vmreg_aarch64.inline.hpp index aa750104896f1..85cfb01ee85d0 100644 --- a/src/hotspot/cpu/aarch64/vmreg_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/vmreg_aarch64.inline.hpp @@ -26,18 +26,18 @@ #ifndef CPU_AARCH64_VMREG_AARCH64_INLINE_HPP #define CPU_AARCH64_VMREG_AARCH64_INLINE_HPP -inline VMReg RegisterImpl::as_VMReg() const { - if( this==noreg ) return VMRegImpl::Bad(); - return VMRegImpl::as_VMReg(encoding() * RegisterImpl::max_slots_per_register); +inline VMReg Register::RegisterImpl::as_VMReg() const { + return VMRegImpl::as_VMReg(encoding() * Register::max_slots_per_register); } -inline VMReg FloatRegisterImpl::as_VMReg() const { - return VMRegImpl::as_VMReg((encoding() * FloatRegisterImpl::max_slots_per_register) + +inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const { + return VMRegImpl::as_VMReg((encoding() * FloatRegister::max_slots_per_register) + ConcreteRegisterImpl::max_gpr); } -inline VMReg PRegisterImpl::as_VMReg() const { - return VMRegImpl::as_VMReg(encoding() + ConcreteRegisterImpl::max_fpr); +inline VMReg PRegister::PRegisterImpl::as_VMReg() const { + return VMRegImpl::as_VMReg((encoding() * PRegister::max_slots_per_register) + + ConcreteRegisterImpl::max_fpr); } #endif // CPU_AARCH64_VMREG_AARCH64_INLINE_HPP diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp index 9d7de578a8199..01d54f62f763d 100644 --- a/src/hotspot/cpu/arm/frame_arm.cpp +++ b/src/hotspot/cpu/arm/frame_arm.cpp @@ -123,7 +123,7 @@ bool frame::safe_for_sender(JavaThread *thread) { } // We must always be able to find a recognizable pc - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); if (sender_pc == NULL || sender_blob == NULL) { return false; } @@ -148,10 +148,6 @@ bool frame::safe_for_sender(JavaThread *thread) { return sender.is_interpreted_frame_valid(thread); } - if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { - return false; - } - // Could just be some random pointer within the codeBlob if (!sender_blob->code_contains(sender_pc)) { return false; diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp index bf91c4221093f..95120aad768f0 100644 --- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp +++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp @@ -290,7 +290,7 @@ void RawNativeJump::check_verified_entry_alignment(address entry, address verifi void RawNativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be"); int *a = (int *)verified_entry; - a[0] = zombie_illegal_instruction; // always illegal + a[0] = not_entrant_illegal_instruction; // always illegal ICache::invalidate_range((address)&a[0], sizeof a[0]); } diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp index 572cb0f0cc481..0c9e157c5535d 100644 --- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp +++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp @@ -63,7 +63,7 @@ class RawNativeInstruction { // illegal instruction used by NativeJump::patch_verified_entry // permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4 - static const int zombie_illegal_instruction = 0xe7f000f0; + static const int not_entrant_illegal_instruction = 0xe7f000f0; static int decode_rotated_imm12(int encoding) { int base = encoding & 0xff; diff --git a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp index 4e918a1496866..c2c30a9766acc 100644 --- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp @@ -796,7 +796,7 @@ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(Abstract address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // determine code generation flags - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // Incoming registers: // @@ -1128,7 +1128,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { // determine code generation flags - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // Rmethod: Method* // Rthread: thread diff --git a/src/hotspot/cpu/ppc/frame_ppc.cpp b/src/hotspot/cpu/ppc/frame_ppc.cpp index 4112b651bb6ad..0c049c2140155 100644 --- a/src/hotspot/cpu/ppc/frame_ppc.cpp +++ b/src/hotspot/cpu/ppc/frame_ppc.cpp @@ -119,16 +119,11 @@ bool frame::safe_for_sender(JavaThread *thread) { address sender_pc = (address) sender_abi->lr;; // We must always be able to find a recognizable pc. - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); if (sender_blob == NULL) { return false; } - // Could be a zombie method - if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { - return false; - } - // It should be safe to construct the sender though it might not be valid. frame sender(sender_sp, sender_pc); diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp index ae655bb2b2b86..2b9a322637496 100644 --- a/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp @@ -122,7 +122,12 @@ void BarrierSetNMethod::disarm(nmethod* nm) { } void BarrierSetNMethod::arm(nmethod* nm, int arm_value) { - Unimplemented(); + if (!supports_entry_barrier(nm)) { + return; + } + + NativeNMethodBarrier* barrier = get_nmethod_barrier(nm); + barrier->release_set_guard_value(arm_value); } bool BarrierSetNMethod::is_armed(nmethod* nm) { diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index 46476dbb79c9b..5b0a8026528f6 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -2210,7 +2210,7 @@ void InterpreterMacroAssembler::get_method_counters(Register method, void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register iv_be_count, Register Rtmp_r0) { - assert(UseCompiler || LogTouchedMethods, "incrementing must be useful"); + assert(UseCompiler, "incrementing must be useful"); Register invocation_count = iv_be_count; Register backedge_count = Rtmp_r0; int delta = InvocationCounter::count_increment; diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp index eece5739d5b0f..63684b2db43eb 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp @@ -40,14 +40,14 @@ #include "c1/c1_Runtime1.hpp" #endif -// We use an illtrap for marking a method as not_entrant or zombie +// We use an illtrap for marking a method as not_entrant // Work around a C++ compiler bug which changes 'this' -bool NativeInstruction::is_sigill_zombie_not_entrant_at(address addr) { +bool NativeInstruction::is_sigill_not_entrant_at(address addr) { if (*(int*)addr != 0 /*illtrap*/) return false; - CodeBlob* cb = CodeCache::find_blob_unsafe(addr); + CodeBlob* cb = CodeCache::find_blob(addr); if (cb == NULL || !cb->is_nmethod()) return false; nmethod *nm = (nmethod *)cb; - // This method is not_entrant or zombie iff the illtrap instruction is + // This method is not_entrant iff the illtrap instruction is // located at the verified entry point. return nm->verified_entry_point() == addr; } @@ -71,7 +71,7 @@ address NativeCall::destination() const { // Trampoline stubs are located behind the main code. if (destination > addr) { // Filter out recursive method invocation (call to verified/unverified entry point). - CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. + CodeBlob* cb = CodeCache::find_blob(addr); assert(cb && cb->is_nmethod(), "sanity"); nmethod *nm = (nmethod *)cb; if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { @@ -196,7 +196,7 @@ intptr_t NativeMovConstReg::data() const { return MacroAssembler::get_const(addr); } - CodeBlob* cb = CodeCache::find_blob_unsafe(addr); + CodeBlob* cb = CodeCache::find_blob(addr); assert(cb != NULL, "Could not find code blob"); if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) { narrowOop no = MacroAssembler::get_narrow_oop(addr, cb->content_begin()); @@ -318,7 +318,7 @@ void NativeMovConstReg::verify() { address addr = addr_at(0); if (! MacroAssembler::is_load_const_at(addr) && ! MacroAssembler::is_load_const_from_method_toc_at(addr)) { - CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // find_nmethod() asserts if nmethod is zombie. + CodeBlob* cb = CodeCache::find_blob(addr); if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) && ! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) && ! MacroAssembler::is_bl(*((int*) addr))) { @@ -343,7 +343,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add a->b(dest); } else { // The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub(). - // We use an illtrap for marking a method as not_entrant or zombie. + // We use an illtrap for marking a method as not_entrant. a->illtrap(); } ICache::ppc64_flush_icache_bytes(verified_entry, code_size); @@ -406,7 +406,7 @@ address NativeCallTrampolineStub::encoded_destination_addr() const { } address NativeCallTrampolineStub::destination(nmethod *nm) const { - CodeBlob* cb = nm ? nm : CodeCache::find_blob_unsafe(addr_at(0)); + CodeBlob* cb = nm ? nm : CodeCache::find_blob(addr_at(0)); assert(cb != NULL, "Could not find code blob"); address ctable = cb->content_begin(); diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp index 94ce07a7f5f63..7d6e6cff5a5c8 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp @@ -67,12 +67,12 @@ class NativeInstruction { return MacroAssembler::tdi_get_si16(long_at(0), Assembler::traptoUnconditional, 0); } - // We use an illtrap for marking a method as not_entrant or zombie. - bool is_sigill_zombie_not_entrant() { + // We use an illtrap for marking a method as not_entrant. + bool is_sigill_not_entrant() { // Work around a C++ compiler bug which changes 'this'. - return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0)); + return NativeInstruction::is_sigill_not_entrant_at(addr_at(0)); } - static bool is_sigill_zombie_not_entrant_at(address addr); + static bool is_sigill_not_entrant_at(address addr); #ifdef COMPILER2 // SIGTRAP-based implicit range checks diff --git a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp index 4d1349673f634..034e476131c00 100644 --- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp @@ -1190,7 +1190,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { address entry = __ pc(); - const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + const bool inc_counter = UseCompiler || CountCompiledCalls; // ----------------------------------------------------------------------------- // Allocate a new frame that represents the native callee (i2n frame). @@ -1614,7 +1614,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // Generic interpreted method entry to (asm) interpreter. // address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; address entry = __ pc(); // Generate the code to allocate the interpreter stack frame. Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp index 35144fc955265..8839fa001aa69 100644 --- a/src/hotspot/cpu/riscv/assembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp @@ -50,23 +50,23 @@ class Argument { }; // function argument(caller-save registers) -REGISTER_DECLARATION(Register, c_rarg0, x10); -REGISTER_DECLARATION(Register, c_rarg1, x11); -REGISTER_DECLARATION(Register, c_rarg2, x12); -REGISTER_DECLARATION(Register, c_rarg3, x13); -REGISTER_DECLARATION(Register, c_rarg4, x14); -REGISTER_DECLARATION(Register, c_rarg5, x15); -REGISTER_DECLARATION(Register, c_rarg6, x16); -REGISTER_DECLARATION(Register, c_rarg7, x17); - -REGISTER_DECLARATION(FloatRegister, c_farg0, f10); -REGISTER_DECLARATION(FloatRegister, c_farg1, f11); -REGISTER_DECLARATION(FloatRegister, c_farg2, f12); -REGISTER_DECLARATION(FloatRegister, c_farg3, f13); -REGISTER_DECLARATION(FloatRegister, c_farg4, f14); -REGISTER_DECLARATION(FloatRegister, c_farg5, f15); -REGISTER_DECLARATION(FloatRegister, c_farg6, f16); -REGISTER_DECLARATION(FloatRegister, c_farg7, f17); +constexpr Register c_rarg0 = x10; +constexpr Register c_rarg1 = x11; +constexpr Register c_rarg2 = x12; +constexpr Register c_rarg3 = x13; +constexpr Register c_rarg4 = x14; +constexpr Register c_rarg5 = x15; +constexpr Register c_rarg6 = x16; +constexpr Register c_rarg7 = x17; + +constexpr FloatRegister c_farg0 = f10; +constexpr FloatRegister c_farg1 = f11; +constexpr FloatRegister c_farg2 = f12; +constexpr FloatRegister c_farg3 = f13; +constexpr FloatRegister c_farg4 = f14; +constexpr FloatRegister c_farg5 = f15; +constexpr FloatRegister c_farg6 = f16; +constexpr FloatRegister c_farg7 = f17; // Symbolically name the register arguments used by the Java calling convention. // We have control over the convention for java so we can do what we please. @@ -83,32 +83,32 @@ REGISTER_DECLARATION(FloatRegister, c_farg7, f17); // | j_rarg7 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 j_rarg5 j_rarg6 | // |------------------------------------------------------------------------| -REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); -REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); -REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); -REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); -REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); -REGISTER_DECLARATION(Register, j_rarg5, c_rarg6); -REGISTER_DECLARATION(Register, j_rarg6, c_rarg7); -REGISTER_DECLARATION(Register, j_rarg7, c_rarg0); +constexpr Register j_rarg0 = c_rarg1; +constexpr Register j_rarg1 = c_rarg2; +constexpr Register j_rarg2 = c_rarg3; +constexpr Register j_rarg3 = c_rarg4; +constexpr Register j_rarg4 = c_rarg5; +constexpr Register j_rarg5 = c_rarg6; +constexpr Register j_rarg6 = c_rarg7; +constexpr Register j_rarg7 = c_rarg0; // Java floating args are passed as per C -REGISTER_DECLARATION(FloatRegister, j_farg0, f10); -REGISTER_DECLARATION(FloatRegister, j_farg1, f11); -REGISTER_DECLARATION(FloatRegister, j_farg2, f12); -REGISTER_DECLARATION(FloatRegister, j_farg3, f13); -REGISTER_DECLARATION(FloatRegister, j_farg4, f14); -REGISTER_DECLARATION(FloatRegister, j_farg5, f15); -REGISTER_DECLARATION(FloatRegister, j_farg6, f16); -REGISTER_DECLARATION(FloatRegister, j_farg7, f17); +constexpr FloatRegister j_farg0 = f10; +constexpr FloatRegister j_farg1 = f11; +constexpr FloatRegister j_farg2 = f12; +constexpr FloatRegister j_farg3 = f13; +constexpr FloatRegister j_farg4 = f14; +constexpr FloatRegister j_farg5 = f15; +constexpr FloatRegister j_farg6 = f16; +constexpr FloatRegister j_farg7 = f17; // zero rigster -REGISTER_DECLARATION(Register, zr, x0); +constexpr Register zr = x0; // global pointer -REGISTER_DECLARATION(Register, gp, x3); +constexpr Register gp = x3; // thread pointer -REGISTER_DECLARATION(Register, tp, x4); +constexpr Register tp = x4; // registers used to hold VM data either temporarily within a method // or across method calls @@ -116,40 +116,28 @@ REGISTER_DECLARATION(Register, tp, x4); // volatile (caller-save) registers // current method -- must be in a call-clobbered register -REGISTER_DECLARATION(Register, xmethod, x31); +constexpr Register xmethod = x31; // return address -REGISTER_DECLARATION(Register, ra, x1); +constexpr Register ra = x1; // non-volatile (callee-save) registers -// stack pointer -REGISTER_DECLARATION(Register, sp, x2); -// frame pointer -REGISTER_DECLARATION(Register, fp, x8); -// base of heap -REGISTER_DECLARATION(Register, xheapbase, x27); -// constant pool cache -REGISTER_DECLARATION(Register, xcpool, x26); -// monitors allocated on stack -REGISTER_DECLARATION(Register, xmonitors, x25); -// locals on stack -REGISTER_DECLARATION(Register, xlocals, x24); - -// java thread pointer -REGISTER_DECLARATION(Register, xthread, x23); -// bytecode pointer -REGISTER_DECLARATION(Register, xbcp, x22); -// Dispatch table base -REGISTER_DECLARATION(Register, xdispatch, x21); -// Java expression stack pointer -REGISTER_DECLARATION(Register, esp, x20); -// Sender's SP while in interpreter -REGISTER_DECLARATION(Register, x19_sender_sp, x19); +constexpr Register sp = x2; // stack pointer +constexpr Register fp = x8; // frame pointer +constexpr Register xheapbase = x27; // base of heap +constexpr Register xcpool = x26; // constant pool cache +constexpr Register xmonitors = x25; // monitors allocated on stack +constexpr Register xlocals = x24; // locals on stack +constexpr Register xthread = x23; // java thread pointer +constexpr Register xbcp = x22; // bytecode pointer +constexpr Register xdispatch = x21; // Dispatch table base +constexpr Register esp = x20; // Java expression stack pointer +constexpr Register x19_sender_sp = x19; // Sender's SP while in interpreter // temporary register(caller-save registers) -REGISTER_DECLARATION(Register, t0, x5); -REGISTER_DECLARATION(Register, t1, x6); -REGISTER_DECLARATION(Register, t2, x7); +constexpr Register t0 = x5; +constexpr Register t1 = x6; +constexpr Register t2 = x7; const Register g_INTArgReg[Argument::n_int_register_parameters_c] = { c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7 @@ -218,10 +206,6 @@ class Address { bool uses(Register reg) const { return _base == reg; } const address target() const { return _target; } const RelocationHolder& rspec() const { return _rspec; } - ~Address() { - _target = NULL; - _base = NULL; - } }; // Convenience classes @@ -394,15 +378,15 @@ class Assembler : public AbstractAssembler { } static void patch_reg(address a, unsigned lsb, Register reg) { - patch(a, lsb + 4, lsb, reg->encoding_nocheck()); + patch(a, lsb + 4, lsb, reg->raw_encoding()); } static void patch_reg(address a, unsigned lsb, FloatRegister reg) { - patch(a, lsb + 4, lsb, reg->encoding_nocheck()); + patch(a, lsb + 4, lsb, reg->raw_encoding()); } static void patch_reg(address a, unsigned lsb, VectorRegister reg) { - patch(a, lsb + 4, lsb, reg->encoding_nocheck()); + patch(a, lsb + 4, lsb, reg->raw_encoding()); } void emit(unsigned insn) { @@ -1550,7 +1534,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs1, VectorRegister Vs2, VectorMask vm = unmasked) { \ - patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Vs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Single-Width Floating-Point Fused Multiply-Add Instructions @@ -1573,7 +1557,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, funct6) \ void NAME(VectorRegister Vd, Register Rs1, VectorRegister Vs2, VectorMask vm = unmasked) { \ - patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Single-Width Integer Multiply-Add Instructions @@ -1588,7 +1572,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, funct6) \ void NAME(VectorRegister Vd, FloatRegister Rs1, VectorRegister Vs2, VectorMask vm = unmasked) { \ - patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Single-Width Floating-Point Fused Multiply-Add Instructions @@ -1605,7 +1589,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs2, VectorRegister Vs1, VectorMask vm = unmasked) { \ - patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Vs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Single-Width Floating-Point Reduction Instructions @@ -1695,7 +1679,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs2, Register Rs1, VectorMask vm = unmasked) { \ - patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Integer Divide Instructions @@ -1748,7 +1732,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs2, FloatRegister Rs1, VectorMask vm = unmasked) { \ - patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Floating-Point Compare Instructions @@ -1811,7 +1795,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, vm, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs2, VectorRegister Vs1) { \ - patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Vs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Compress Instruction @@ -1842,7 +1826,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, Vs2, vm, funct6) \ void NAME(VectorRegister Vd, FloatRegister Rs1) { \ - patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ } // Floating-Point Scalar Move Instructions @@ -1854,7 +1838,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, Vs2, vm, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs1) { \ - patch_VArith(op, Vd, funct3, Vs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Vs1->raw_encoding(), Vs2, vm, funct6); \ } // Vector Integer Move Instructions @@ -1864,7 +1848,7 @@ enum VectorMask { #define INSN(NAME, op, funct3, Vs2, vm, funct6) \ void NAME(VectorRegister Vd, Register Rs1) { \ - patch_VArith(op, Vd, funct3, Rs1->encoding_nocheck(), Vs2, vm, funct6); \ + patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ } // Integer Scalar Move Instructions @@ -1975,7 +1959,7 @@ enum Nf { #define INSN(NAME, op, width, mop, mew) \ void NAME(VectorRegister Vd, Register Rs1, VectorRegister Vs2, VectorMask vm = unmasked, Nf nf = g1) { \ - patch_VLdSt(op, Vd, width, Rs1, Vs2->encoding_nocheck(), vm, mop, mew, nf); \ + patch_VLdSt(op, Vd, width, Rs1, Vs2->raw_encoding(), vm, mop, mew, nf); \ } // Vector unordered indexed load instructions @@ -1993,7 +1977,7 @@ enum Nf { #define INSN(NAME, op, width, mop, mew) \ void NAME(VectorRegister Vd, Register Rs1, Register Rs2, VectorMask vm = unmasked, Nf nf = g1) { \ - patch_VLdSt(op, Vd, width, Rs1, Rs2->encoding_nocheck(), vm, mop, mew, nf); \ + patch_VLdSt(op, Vd, width, Rs1, Rs2->raw_encoding(), vm, mop, mew, nf); \ } // Vector Strided Instructions @@ -2165,22 +2149,22 @@ enum Nf { // patch a 16-bit instruction with a general purpose register ranging [0, 31] (5 bits) static void c_patch_reg(address a, unsigned lsb, Register reg) { - c_patch(a, lsb + 4, lsb, reg->encoding_nocheck()); + c_patch(a, lsb + 4, lsb, reg->raw_encoding()); } // patch a 16-bit instruction with a general purpose register ranging [8, 15] (3 bits) static void c_patch_compressed_reg(address a, unsigned lsb, Register reg) { - c_patch(a, lsb + 2, lsb, reg->compressed_encoding_nocheck()); + c_patch(a, lsb + 2, lsb, reg->compressed_raw_encoding()); } // patch a 16-bit instruction with a float register ranging [0, 31] (5 bits) static void c_patch_reg(address a, unsigned lsb, FloatRegister reg) { - c_patch(a, lsb + 4, lsb, reg->encoding_nocheck()); + c_patch(a, lsb + 4, lsb, reg->raw_encoding()); } // patch a 16-bit instruction with a float register ranging [8, 15] (3 bits) static void c_patch_compressed_reg(address a, unsigned lsb, FloatRegister reg) { - c_patch(a, lsb + 2, lsb, reg->compressed_encoding_nocheck()); + c_patch(a, lsb + 2, lsb, reg->compressed_raw_encoding()); } // -------------- RVC Instruction Definitions -------------- diff --git a/src/hotspot/cpu/riscv/c1_Defs_riscv.hpp b/src/hotspot/cpu/riscv/c1_Defs_riscv.hpp index 4417ad6309124..9e132a52699e2 100644 --- a/src/hotspot/cpu/riscv/c1_Defs_riscv.hpp +++ b/src/hotspot/cpu/riscv/c1_Defs_riscv.hpp @@ -39,8 +39,8 @@ enum { // registers enum { - pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission - pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of float registers used during code emission + pd_nof_cpu_regs_frame_map = Register::number_of_registers, // number of registers used during code emission + pd_nof_fpu_regs_frame_map = FloatRegister::number_of_registers, // number of float registers used during code emission // caller saved pd_nof_caller_save_cpu_regs_frame_map = 13, // number of registers killed by calls diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index 18ca94191503f..4a19d430cc6fd 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -291,7 +291,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) { if (o == NULL) { __ mv(reg, zr); } else { - __ movoop(reg, o, /* immediate */ true); + __ movoop(reg, o); } } diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 735ed88a796f2..56dd336a25c56 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -310,7 +310,7 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) { // Insert nmethod entry barrier into frame. BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->nmethod_entry_barrier(this); + bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */); } void C1_MacroAssembler::remove_frame(int framesize) { diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 24ee64ad41d45..3061be8b4c104 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -28,6 +28,7 @@ #include "asm/assembler.inline.hpp" #include "opto/c2_MacroAssembler.hpp" #include "opto/intrinsicnode.hpp" +#include "opto/output.hpp" #include "opto/subnode.hpp" #include "runtime/stubRoutines.hpp" @@ -241,6 +242,35 @@ void C2_MacroAssembler::string_indexof_char(Register str1, Register cnt1, typedef void (MacroAssembler::* load_chr_insn)(Register rd, const Address &adr, Register temp); +void C2_MacroAssembler::emit_entry_barrier_stub(C2EntryBarrierStub* stub) { + // make guard value 4-byte aligned so that it can be accessed by atomic instructions on riscv + int alignment_bytes = align(4); + + bind(stub->slow_path()); + + int32_t _offset = 0; + movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), _offset); + jalr(ra, t0, _offset); + j(stub->continuation()); + + bind(stub->guard()); + relocate(entry_guard_Relocation::spec()); + assert(offset() % 4 == 0, "bad alignment"); + emit_int32(0); // nmethod guard value + // make sure the stub with a fixed code size + if (alignment_bytes == 2) { + assert(UseRVC, "bad alignment"); + c_nop(); + } else { + assert(alignment_bytes == 0, "bad alignment"); + nop(); + } +} + +int C2_MacroAssembler::entry_barrier_stub_size() { + return 8 * 4 + 4; // 4 bytes for alignment margin +} + // Search for needle in haystack and return index or -1 // x10: result // x11: haystack diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp index 8c43690733bed..10f7bf6846ce5 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp @@ -36,8 +36,8 @@ VectorRegister vrs, bool is_latin, Label& DONE); public: - void emit_entry_barrier_stub(C2EntryBarrierStub* stub) {} - static int entry_barrier_stub_size() { return 0; } + void emit_entry_barrier_stub(C2EntryBarrierStub* stub); + static int entry_barrier_stub_size(); void string_compare(Register str1, Register str2, Register cnt1, Register cnt2, Register result, diff --git a/src/hotspot/cpu/riscv/frame_riscv.cpp b/src/hotspot/cpu/riscv/frame_riscv.cpp index c346497c01d2c..a70ebea6eb14d 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.cpp +++ b/src/hotspot/cpu/riscv/frame_riscv.cpp @@ -175,16 +175,11 @@ bool frame::safe_for_sender(JavaThread *thread) { } // We must always be able to find a recognizable pc - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); if (sender_pc == NULL || sender_blob == NULL) { return false; } - // Could be a zombie method - if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { - return false; - } - // Could just be some random pointer within the codeBlob if (!sender_blob->code_contains(sender_pc)) { return false; diff --git a/src/hotspot/cpu/riscv/frame_riscv.inline.hpp b/src/hotspot/cpu/riscv/frame_riscv.inline.hpp index 934f0e5910e12..e62338403bf45 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.inline.hpp +++ b/src/hotspot/cpu/riscv/frame_riscv.inline.hpp @@ -115,10 +115,8 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) { _pc = (address)(ptr_sp[-1]); // Here's a sticky one. This constructor can be called via AsyncGetCallTrace - // when last_Java_sp is non-null but the pc fetched is junk. If we are truly - // unlucky the junk value could be to a zombied method and we'll die on the - // find_blob call. This is also why we can have no asserts on the validity - // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler + // when last_Java_sp is non-null but the pc fetched is junk. + // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler // -> pd_last_frame should use a specialized version of pd_last_frame which could // call a specilaized frame constructor instead of this one. // Then we could use the assert below. However this assert is of somewhat dubious diff --git a/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp index 836d512138011..d29e092d71e1e 100644 --- a/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,6 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, RegSet saved_regs) { - assert_cond(masm != NULL); bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; if (!dest_uninitialized) { Label done; @@ -88,7 +87,6 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register start, Register count, Register tmp, RegSet saved_regs) { - assert_cond(masm != NULL); __ push_reg(saved_regs, sp); assert_different_registers(start, count, tmp); assert_different_registers(c_rarg0, count); @@ -109,7 +107,6 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, // directly to skip generating the check by // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. - assert_cond(masm != NULL); assert(thread == xthread, "must be"); Label done; @@ -157,12 +154,8 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, __ j(done); __ bind(runtime); - // save the live input values - RegSet saved = RegSet::of(pre_val); - if (tosca_live) { saved += RegSet::of(x10); } - if (obj != noreg) { saved += RegSet::of(obj); } - __ push_reg(saved, sp); + __ push_call_clobbered_registers(); if (expand_call) { assert(pre_val != c_rarg1, "smashed arg"); @@ -171,7 +164,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); } - __ pop_reg(saved, sp); + __ pop_call_clobbered_registers(); __ bind(done); @@ -183,7 +176,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Register thread, Register tmp, Register tmp2) { - assert_cond(masm != NULL); assert(thread == xthread, "must be"); assert_different_registers(store_addr, new_val, thread, tmp, tmp2, t0); @@ -258,7 +250,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread) { - assert_cond(masm != NULL); bool on_oop = is_reference_type(type); bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; @@ -282,7 +273,6 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2) { - assert_cond(masm != NULL); // flatten object address if needed if (dst.offset() == 0) { if (dst.base() != x13) { diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp index ab38cf0d11dbf..782019902c660 100644 --- a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp @@ -40,8 +40,6 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread) { - assert_cond(masm != NULL); - // RA is live. It must be saved around calls. bool in_heap = (decorators & IN_HEAP) != 0; @@ -82,7 +80,6 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2) { - assert_cond(masm != NULL); bool in_heap = (decorators & IN_HEAP) != 0; bool in_native = (decorators & IN_NATIVE) != 0; switch (type) { @@ -124,7 +121,6 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, Register obj, Register tmp, Label& slowpath) { - assert_cond(masm != NULL); // If mask changes we need to ensure that the inverse is still encodable as an immediate STATIC_ASSERT(JNIHandles::weak_tag_mask == 1); __ andi(obj, obj, ~JNIHandles::weak_tag_mask); @@ -139,7 +135,6 @@ void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, Register tmp2, Label& slow_case, bool is_far) { - assert_cond(masm != NULL); assert_different_registers(obj, tmp2); assert_different_registers(obj, var_size_in_bytes); Register end = tmp2; @@ -166,7 +161,6 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1) { - assert_cond(masm != NULL); assert(tmp1->is_valid(), "need temp reg"); __ ld(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset()))); @@ -178,38 +172,104 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, __ sd(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset()))); } -void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) { +static volatile uint32_t _patching_epoch = 0; + +address BarrierSetAssembler::patching_epoch_addr() { + return (address)&_patching_epoch; +} + +void BarrierSetAssembler::increment_patching_epoch() { + Atomic::inc(&_patching_epoch); +} + +void BarrierSetAssembler::clear_patching_epoch() { + _patching_epoch = 0; +} + +void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) { BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm == NULL) { return; } - // RISCV atomic operations require that the memory address be naturally aligned. - __ align(4); + Label local_guard; + NMethodPatchingType patching_type = nmethod_patching_type(); - Label skip, guard; - Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset())); + if (slow_path == NULL) { + guard = &local_guard; - __ lwu(t0, guard); + // RISCV atomic operations require that the memory address be naturally aligned. + __ align(4); + } - // Subsequent loads of oops must occur after load of guard value. - // BarrierSetNMethod::disarm sets guard with release semantics. - __ membar(MacroAssembler::LoadLoad); - __ lwu(t1, thread_disarmed_addr); - __ beq(t0, t1, skip); + __ lwu(t0, *guard); + + switch (patching_type) { + case NMethodPatchingType::conc_data_patch: + // Subsequent loads of oops must occur after load of guard value. + // BarrierSetNMethod::disarm sets guard with release semantics. + __ membar(MacroAssembler::LoadLoad); // fall through to stw_instruction_and_data_patch + case NMethodPatchingType::stw_instruction_and_data_patch: + { + // With STW patching, no data or instructions are updated concurrently, + // which means there isn't really any need for any fencing for neither + // data nor instruction modification happening concurrently. The + // instruction patching is synchronized with global icache_flush() by + // the write hart on riscv. So here we can do a plain conditional + // branch with no fencing. + Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset())); + __ lwu(t1, thread_disarmed_addr); + break; + } + case NMethodPatchingType::conc_instruction_and_data_patch: + { + // If we patch code we need both a code patching and a loadload + // fence. It's not super cheap, so we use a global epoch mechanism + // to hide them in a slow path. + // The high level idea of the global epoch mechanism is to detect + // when any thread has performed the required fencing, after the + // last nmethod was disarmed. This implies that the required + // fencing has been performed for all preceding nmethod disarms + // as well. Therefore, we do not need any further fencing. + __ la(t1, ExternalAddress((address)&_patching_epoch)); + // Embed an artificial data dependency to order the guard load + // before the epoch load. + __ srli(ra, t0, 32); + __ orr(t1, t1, ra); + // Read the global epoch value. + __ lwu(t1, t1); + // Combine the guard value (low order) with the epoch value (high order). + __ slli(t1, t1, 32); + __ orr(t0, t0, t1); + // Compare the global values with the thread-local values + Address thread_disarmed_and_epoch_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset())); + __ ld(t1, thread_disarmed_and_epoch_addr); + break; + } + default: + ShouldNotReachHere(); + } - int32_t offset = 0; - __ movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), offset); - __ jalr(ra, t0, offset); - __ j(skip); + if (slow_path == NULL) { + Label skip_barrier; + __ beq(t0, t1, skip_barrier); - __ bind(guard); + int32_t offset = 0; + __ movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), offset); + __ jalr(ra, t0, offset); + __ j(skip_barrier); - assert(__ offset() % 4 == 0, "bad alignment"); - __ emit_int32(0); // nmethod guard value. Skipped over in common case. + __ bind(local_guard); - __ bind(skip); + assert(__ offset() % 4 == 0, "bad alignment"); + __ emit_int32(0); // nmethod guard value. Skipped over in common case. + __ bind(skip_barrier); + } else { + __ beq(t0, t1, *continuation); + __ j(*slow_path); + __ bind(*continuation); + } } void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp index 508146447e7ad..f4768460ea805 100644 --- a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp @@ -32,6 +32,12 @@ #include "memory/allocation.hpp" #include "oops/access.hpp" +enum class NMethodPatchingType { + stw_instruction_and_data_patch, + conc_instruction_and_data_patch, + conc_data_patch +}; + class BarrierSetAssembler: public CHeapObj { private: void incr_allocated_bytes(MacroAssembler* masm, @@ -63,9 +69,20 @@ class BarrierSetAssembler: public CHeapObj { virtual void barrier_stubs_init() {} - virtual void nmethod_entry_barrier(MacroAssembler* masm); + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::stw_instruction_and_data_patch; } + + virtual void nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard); virtual void c2i_entry_barrier(MacroAssembler* masm); - virtual ~BarrierSetAssembler() {} + + virtual bool supports_instruction_patching() { + NMethodPatchingType patching_type = nmethod_patching_type(); + return patching_type == NMethodPatchingType::conc_instruction_and_data_patch || + patching_type == NMethodPatchingType::stw_instruction_and_data_patch; + } + + static address patching_epoch_addr(); + static void clear_patching_epoch(); + static void increment_patching_epoch(); }; #endif // CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp index 66682b8f14855..33126326a974c 100644 --- a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "code/codeCache.hpp" #include "code/nativeInst.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/barrierSetNMethod.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" @@ -36,21 +37,57 @@ #include "utilities/align.hpp" #include "utilities/debug.hpp" +static int slow_path_size(nmethod* nm) { + // The slow path code is out of line with C2. + // Leave a jal to the stub in the fast path. + return nm->is_compiled_by_c2() ? 1 : 8; +} + +static int entry_barrier_offset(nmethod* nm) { + BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); + switch (bs_asm->nmethod_patching_type()) { + case NMethodPatchingType::stw_instruction_and_data_patch: + return -4 * (4 + slow_path_size(nm)); + case NMethodPatchingType::conc_data_patch: + return -4 * (5 + slow_path_size(nm)); + case NMethodPatchingType::conc_instruction_and_data_patch: + return -4 * (15 + slow_path_size(nm)); + } + ShouldNotReachHere(); + return 0; +} + class NativeNMethodBarrier: public NativeInstruction { address instruction_address() const { return addr_at(0); } - int *guard_addr() { - /* auipc + lwu + fence + lwu + beq + lui + addi + slli + addi + slli + jalr + j */ - return reinterpret_cast(instruction_address() + 12 * 4); + int local_guard_offset(nmethod* nm) { + // It's the last instruction + return (-entry_barrier_offset(nm)) - 4; + } + + int *guard_addr(nmethod* nm) { + if (nm->is_compiled_by_c2()) { + // With c2 compiled code, the guard is out-of-line in a stub + // We find it using the RelocIterator. + RelocIterator iter(nm); + while (iter.next()) { + if (iter.type() == relocInfo::entry_guard_type) { + entry_guard_Relocation* const reloc = iter.entry_guard_reloc(); + return reinterpret_cast(reloc->addr()); + } + } + ShouldNotReachHere(); + } + return reinterpret_cast(instruction_address() + local_guard_offset(nm)); } public: - int get_value() { - return Atomic::load_acquire(guard_addr()); + int get_value(nmethod* nm) { + return Atomic::load_acquire(guard_addr(nm)); } - void set_value(int value) { - Atomic::release_store(guard_addr(), value); + void set_value(nmethod* nm, int value) { + Atomic::release_store(guard_addr(nm), value); } void verify() const; @@ -64,21 +101,12 @@ struct CheckInsn { }; static const struct CheckInsn barrierInsn[] = { - { 0x00000fff, 0x00000297, "auipc t0, 0 "}, - { 0x000fffff, 0x0002e283, "lwu t0, 48(t0) "}, - { 0xffffffff, 0x0aa0000f, "fence ir, ir "}, - { 0x000fffff, 0x000be303, "lwu t1, 112(xthread)"}, - { 0x01fff07f, 0x00628063, "beq t0, t1, skip "}, - { 0x00000fff, 0x000002b7, "lui t0, imm0 "}, - { 0x000fffff, 0x00028293, "addi t0, t0, imm1 "}, - { 0xffffffff, 0x00b29293, "slli t0, t0, 11 "}, - { 0x000fffff, 0x00028293, "addi t0, t0, imm2 "}, - { 0xffffffff, 0x00629293, "slli t0, t0, 6 "}, - { 0x000fffff, 0x000280e7, "jalr ra, imm3(t0) "}, - { 0x00000fff, 0x0000006f, "j skip "} + { 0x00000fff, 0x00000297, "auipc t0, 0 "}, + { 0x000fffff, 0x0002e283, "lwu t0, guard_offset(t0) "}, + /* ...... */ + /* ...... */ /* guard: */ /* 32bit nmethod guard value */ - /* skip: */ }; // The encodings must match the instructions emitted by @@ -136,19 +164,8 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) { new_frame->pc = SharedRuntime::get_handle_wrong_method_stub(); } -// This is the offset of the entry barrier from where the frame is completed. -// If any code changes between the end of the verified entry where the entry -// barrier resides, and the completion of the frame, then -// NativeNMethodCmpBarrier::verify() will immediately complain when it does -// not find the expected native instruction at this offset, which needs updating. -// Note that this offset is invariant of PreserveFramePointer. - -// see BarrierSetAssembler::nmethod_entry_barrier -// auipc + lwu + fence + lwu + beq + movptr_with_offset(5 instructions) + jalr + j + int32 -static const int entry_barrier_offset = -4 * 13; - static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) { - address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset; + address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm); NativeNMethodBarrier* barrier = reinterpret_cast(barrier_address); debug_only(barrier->verify()); return barrier; @@ -159,14 +176,39 @@ void BarrierSetNMethod::disarm(nmethod* nm) { return; } + // The patching epoch is incremented before the nmethod is disarmed. Disarming + // is performed with a release store. In the nmethod entry barrier, the values + // are read in the opposite order, such that the load of the nmethod guard + // acquires the patching epoch. This way, the guard is guaranteed to block + // entries to the nmethod, util it has safely published the requirement for + // further fencing by mutators, before they are allowed to enter. + BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); + bs_asm->increment_patching_epoch(); + // Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier. + // Symmetric "LD; FENCE IR, IR" is in the nmethod barrier. NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); - - barrier->set_value(disarmed_value()); + barrier->set_value(nm, disarmed_value()); } void BarrierSetNMethod::arm(nmethod* nm, int arm_value) { - Unimplemented(); + if (!supports_entry_barrier(nm)) { + return; + } + + if (arm_value == disarmed_value()) { + // The patching epoch is incremented before the nmethod is disarmed. Disarming + // is performed with a release store. In the nmethod entry barrier, the values + // are read in the opposite order, such that the load of the nmethod guard + // acquires the patching epoch. This way, the guard is guaranteed to block + // entries to the nmethod, until it has safely published the requirement for + // further fencing by mutators, before they are allowed to enter. + BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); + bs_asm->increment_patching_epoch(); + } + + NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); + barrier->set_value(nm, arm_value); } bool BarrierSetNMethod::is_armed(nmethod* nm) { @@ -175,5 +217,5 @@ bool BarrierSetNMethod::is_armed(nmethod* nm) { } NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); - return barrier->get_value() != disarmed_value(); + return barrier->get_value(nm) != disarmed_value(); } diff --git a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp index a419f92b5f6fc..1902490a860d9 100644 --- a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp @@ -36,7 +36,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp) { - assert_cond(masm != NULL); assert_different_registers(obj, tmp); BarrierSet* bs = BarrierSet::barrier_set(); assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); @@ -62,7 +61,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register start, Register count, Register tmp, RegSet saved_regs) { - assert_cond(masm != NULL); assert_different_registers(start, tmp); assert_different_registers(count, tmp); @@ -103,7 +101,6 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS if (!precise || dst.offset() == 0) { store_check(masm, dst.base(), x13); } else { - assert_cond(masm != NULL); __ la(x13, dst); store_check(masm, x13, t0); } diff --git a/src/hotspot/cpu/riscv/gc/shared/modRefBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/modRefBarrierSetAssembler_riscv.cpp index 7aa2015f9ec0e..4b7982eb213a7 100644 --- a/src/hotspot/cpu/riscv/gc/shared/modRefBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/modRefBarrierSetAssembler_riscv.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register src, Register dst, Register count, RegSet saved_regs) { - if (is_oop) { gen_write_ref_array_pre_barrier(masm, decorators, dst, count, saved_regs); } diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp index a705f49766777..522771fc8fb99 100644 --- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.hpp @@ -63,6 +63,8 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { void iu_barrier(MacroAssembler* masm, Register dst, Register tmp); + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + #ifdef COMPILER1 void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp index ce38ef00d3f18..5ff57453314d7 100644 --- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp @@ -238,7 +238,7 @@ class ZSaveLiveRegisters { } else if (vm_reg->is_FloatRegister()) { _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); } else if (vm_reg->is_VectorRegister()) { - const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegisterImpl::max_slots_per_register - 1)); + const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1)); _vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister()); } else { fatal("Unknown register type"); diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp index dc07ab635fed9..14424e4e21d9a 100644 --- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp @@ -78,6 +78,8 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp, Label& slowpath); + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + #ifdef COMPILER1 void generate_c1_load_barrier_test(LIR_Assembler* ce, LIR_Opr ref) const; diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 972b259459788..39ebd18f5a6dc 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -548,11 +548,11 @@ void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll } void InterpreterMacroAssembler::dispatch_only_normal(TosState state, Register Rs) { - dispatch_base(state, Interpreter::normal_table(state), Rs); + dispatch_base(state, Interpreter::normal_table(state), true, false, Rs); } void InterpreterMacroAssembler::dispatch_only_noverify(TosState state, Register Rs) { - dispatch_base(state, Interpreter::normal_table(state), false, Rs); + dispatch_base(state, Interpreter::normal_table(state), false, false, Rs); } void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) { diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index bbccb8b6c550c..399e87b294702 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -62,35 +62,33 @@ static void pass_arg0(MacroAssembler* masm, Register arg) { if (c_rarg0 != arg) { - assert_cond(masm != NULL); masm->mv(c_rarg0, arg); } } static void pass_arg1(MacroAssembler* masm, Register arg) { if (c_rarg1 != arg) { - assert_cond(masm != NULL); masm->mv(c_rarg1, arg); } } static void pass_arg2(MacroAssembler* masm, Register arg) { if (c_rarg2 != arg) { - assert_cond(masm != NULL); masm->mv(c_rarg2, arg); } } static void pass_arg3(MacroAssembler* masm, Register arg) { if (c_rarg3 != arg) { - assert_cond(masm != NULL); masm->mv(c_rarg3, arg); } } -void MacroAssembler::align(int modulus, int extra_offset) { +int MacroAssembler::align(int modulus, int extra_offset) { CompressibleRegion cr(this); + intptr_t before = offset(); while ((offset() + extra_offset) % modulus != 0) { nop(); } + return (int)(offset() - before); } void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { @@ -1123,9 +1121,9 @@ void MacroAssembler::push_CPU_state(bool save_vectors, int vector_size_in_bytes) // vector registers if (save_vectors) { - sub(sp, sp, vector_size_in_bytes * VectorRegisterImpl::number_of_registers); + sub(sp, sp, vector_size_in_bytes * VectorRegister::number_of_registers); vsetvli(t0, x0, Assembler::e64, Assembler::m8); - for (int i = 0; i < VectorRegisterImpl::number_of_registers; i += 8) { + for (int i = 0; i < VectorRegister::number_of_registers; i += 8) { add(t0, sp, vector_size_in_bytes * i); vse64_v(as_VectorRegister(i), t0); } @@ -1137,7 +1135,7 @@ void MacroAssembler::pop_CPU_state(bool restore_vectors, int vector_size_in_byte // vector registers if (restore_vectors) { vsetvli(t0, x0, Assembler::e64, Assembler::m8); - for (int i = 0; i < VectorRegisterImpl::number_of_registers; i += 8) { + for (int i = 0; i < VectorRegister::number_of_registers; i += 8) { vle64_v(as_VectorRegister(i), sp); add(sp, sp, vector_size_in_bytes * 8); } @@ -1353,7 +1351,7 @@ void MacroAssembler::reinit_heapbase() { mv(xheapbase, CompressedOops::ptrs_base()); } else { int32_t offset = 0; - la_patchable(xheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()), offset); + la_patchable(xheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()), offset); ld(xheapbase, Address(xheapbase, offset)); } } @@ -1646,11 +1644,8 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp, beq(trial_klass, tmp, L); } -// Move an oop into a register. immediate is true if we want -// immediate instructions and nmethod entry barriers are not enabled. -// i.e. we are not going to patch this instruction while the code is being -// executed by another thread. -void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { +// Move an oop into a register. +void MacroAssembler::movoop(Register dst, jobject obj) { int oop_index; if (obj == NULL) { oop_index = oop_recorder()->allocate_oop_index(obj); @@ -1665,13 +1660,12 @@ void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { } RelocationHolder rspec = oop_Relocation::spec(oop_index); - // nmethod entry barrier necessitate using the constant pool. They have to be - // ordered with respected to oop access. - if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate) { + if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { + mv(dst, Address((address)obj, rspec)); + } else { address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address ld_constant(dst, Address(dummy, rspec)); - } else - mv(dst, Address((address)obj, rspec)); + } } // Move a metadata address into a register. @@ -1716,7 +1710,6 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { } SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value) { - assert_cond(masm != NULL); int32_t offset = 0; _masm = masm; _masm->la_patchable(t0, ExternalAddress((address)flag_addr), offset); @@ -1725,7 +1718,6 @@ SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value } SkipIfEqual::~SkipIfEqual() { - assert_cond(_masm != NULL); _masm->bind(_label); _masm = NULL; } @@ -2300,7 +2292,7 @@ void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected, bnez(tmp, retry); if (result_as_bool) { - addi(result, zr, 1); + li(result, 1); j(done); bind(fail); @@ -2335,7 +2327,7 @@ void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected, assert_different_registers(addr, old, mask, not_mask, new_val, expected, shift, tmp); cmpxchg_narrow_value_helper(addr, expected, new_val, size, tmp1, tmp2, tmp3); - Label succ, fail, done; + Label fail, done; lr_w(old, aligned_addr, acquire); andr(tmp, old, mask); @@ -2344,13 +2336,14 @@ void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected, andr(tmp, old, not_mask); orr(tmp, tmp, new_val); sc_w(tmp, tmp, aligned_addr, release); - beqz(tmp, succ); + bnez(tmp, fail); - bind(fail); - addi(result, zr, 1); + // Success + li(result, 1); j(done); - bind(succ); + // Fail + bind(fail); mv(result, zr); bind(done); @@ -2394,20 +2387,20 @@ void MacroAssembler::cmpxchg_weak(Register addr, Register expected, enum operand_size size, Assembler::Aqrl acquire, Assembler::Aqrl release, Register result) { - Label fail, done, sc_done; + Label fail, done; load_reserved(addr, size, acquire); bne(t0, expected, fail); store_conditional(addr, new_val, size, release); - beqz(t0, sc_done); + bnez(t0, fail); - // fail - bind(fail); + // Success li(result, 1); j(done); - // sc_done - bind(sc_done); - mv(result, 0); + // Fail + bind(fail); + mv(result, zr); + bind(done); } diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index 8c315eff18edb..3b855297dd2a7 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -47,7 +47,7 @@ class MacroAssembler: public Assembler { void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod); // Alignment - void align(int modulus, int extra_offset = 0); + int align(int modulus, int extra_offset = 0); // Stack frame creation/removal // Note that SP must be updated to the right place before saving/restoring RA and FP @@ -175,7 +175,7 @@ class MacroAssembler: public Assembler { void resolve_oop_handle(Register result, Register tmp = x15); void resolve_jobject(Register value, Register thread, Register tmp); - void movoop(Register dst, jobject obj, bool immediate = false); + void movoop(Register dst, jobject obj); void mov_metadata(Register dst, Metadata* obj); void bang_stack_size(Register size, Register tmp); void set_narrow_oop(Register dst, jobject obj); diff --git a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp index 9ce6436668010..4c77ac890ef3c 100644 --- a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp +++ b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp @@ -48,7 +48,6 @@ #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { - assert_cond(_masm != NULL); if (VerifyMethodHandles) { verify_klass(_masm, klass_reg, VM_CLASS_ID(java_lang_Class), "MH argument is a Class"); @@ -70,7 +69,6 @@ static int check_nonzero(const char* xname, int x) { void MethodHandles::verify_klass(MacroAssembler* _masm, Register obj, vmClassID klass_id, const char* error_message) { - assert_cond(_masm != NULL); InstanceKlass** klass_addr = vmClasses::klass_addr_at(klass_id); Klass* klass = vmClasses::klass_at(klass_id); Register temp = t1; @@ -99,7 +97,6 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, bool for_compiler_entry) { - assert_cond(_masm != NULL); assert(method == xmethod, "interpreter calling convention"); Label L_no_such_method; __ beqz(xmethod, L_no_such_method); @@ -130,7 +127,6 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, Register recv, Register method_temp, Register temp2, bool for_compiler_entry) { - assert_cond(_masm != NULL); BLOCK_COMMENT("jump_to_lambda_form {"); // This is the initial entry point of a lazy method handle. // After type checking, it picks up the invoker from the LambdaForm. @@ -169,7 +165,6 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, // Code generation address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid) { - assert_cond(_masm != NULL); const bool not_for_compiler_entry = false; // this is the interpreter entry assert(is_signature_polymorphic(iid), "expected invoke iid"); if (iid == vmIntrinsics::_invokeGeneric || @@ -269,7 +264,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, Register receiver_reg, Register member_reg, bool for_compiler_entry) { - assert_cond(_masm != NULL); assert(is_signature_polymorphic(iid), "expected invoke iid"); // temps used in this code are not used in *either* compiled or interpreted calling sequences Register temp1 = x7; diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp index 1afe875cb4315..8969b158273e3 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp @@ -124,7 +124,7 @@ address NativeCall::destination() const { address destination = MacroAssembler::target_addr_for_insn(instruction_address()); // Do we use a trampoline stub for this call? - CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. + CodeBlob* cb = CodeCache::find_blob(addr); assert(cb && cb->is_nmethod(), "sanity"); nmethod *nm = (nmethod *)cb; if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { @@ -328,7 +328,7 @@ bool NativeInstruction::is_lwu_to_zr(address instr) { } // A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction. -bool NativeInstruction::is_sigill_zombie_not_entrant() { +bool NativeInstruction::is_sigill_not_entrant() { // jvmci return uint_at(0) == 0xffffffff; } @@ -345,14 +345,14 @@ bool NativeInstruction::is_stop() { //------------------------------------------------------------------- // MT-safe inserting of a jump over a jump or a nop (used by -// nmethod::make_not_entrant_or_zombie) +// nmethod::make_not_entrant) void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() || - nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), + nativeInstruction_at(verified_entry)->is_sigill_not_entrant(), "riscv cannot replace non-jump with jump"); // Patch this nmethod atomically. @@ -371,7 +371,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add *(unsigned int*)verified_entry = insn; } else { // We use an illegal instruction for marking a method as - // not_entrant or zombie. + // not_entrant. NativeIllegalInstruction::insert(verified_entry); } diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp index 3f0618fc257aa..96ddd872e2584 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp @@ -198,7 +198,7 @@ class NativeInstruction { inline bool is_nop() const; inline bool is_jump_or_nop(); bool is_safepoint_poll(); - bool is_sigill_zombie_not_entrant(); + bool is_sigill_not_entrant(); bool is_stop(); protected: diff --git a/src/hotspot/cpu/riscv/registerMap_riscv.cpp b/src/hotspot/cpu/riscv/registerMap_riscv.cpp index 41041eef4c0e1..2be1b11d736a7 100644 --- a/src/hotspot/cpu/riscv/registerMap_riscv.cpp +++ b/src/hotspot/cpu/riscv/registerMap_riscv.cpp @@ -31,7 +31,7 @@ address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const { if (base_reg->is_VectorRegister()) { assert(base_reg->is_concrete(), "must pass base reg"); int base_reg_enc = (base_reg->value() - ConcreteRegisterImpl::max_fpr) / - VectorRegisterImpl::max_slots_per_register; + VectorRegister::max_slots_per_register; intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size; address base_location = location(base_reg, nullptr); if (base_location != NULL) { diff --git a/src/hotspot/cpu/riscv/register_riscv.cpp b/src/hotspot/cpu/riscv/register_riscv.cpp index f8116e9df8c7f..98aeafbfe9cbe 100644 --- a/src/hotspot/cpu/riscv/register_riscv.cpp +++ b/src/hotspot/cpu/riscv/register_riscv.cpp @@ -26,23 +26,11 @@ #include "precompiled.hpp" #include "register_riscv.hpp" -REGISTER_IMPL_DEFINITION(Register, RegisterImpl, RegisterImpl::number_of_registers); -REGISTER_IMPL_DEFINITION(FloatRegister, FloatRegisterImpl, FloatRegisterImpl::number_of_registers); -REGISTER_IMPL_DEFINITION(VectorRegister, VectorRegisterImpl, VectorRegisterImpl::number_of_registers); +Register::RegisterImpl all_RegisterImpls [Register::number_of_registers + 1]; +FloatRegister::FloatRegisterImpl all_FloatRegisterImpls [FloatRegister::number_of_registers + 1]; +VectorRegister::VectorRegisterImpl all_VectorRegisterImpls[VectorRegister::number_of_registers + 1]; -const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers * - RegisterImpl::max_slots_per_register; - -const int ConcreteRegisterImpl::max_fpr = - ConcreteRegisterImpl::max_gpr + - FloatRegisterImpl::number_of_registers * FloatRegisterImpl::max_slots_per_register; - -const int ConcreteRegisterImpl::max_vpr = - ConcreteRegisterImpl::max_fpr + - VectorRegisterImpl::number_of_registers * VectorRegisterImpl::max_slots_per_register; - - -const char* RegisterImpl::name() const { +const char* Register::RegisterImpl::name() const { static const char *const names[number_of_registers] = { "zr", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "x9", "c_rarg0", "c_rarg1", "c_rarg2", "c_rarg3", "c_rarg4", "c_rarg5", "c_rarg6", "c_rarg7", @@ -52,7 +40,7 @@ const char* RegisterImpl::name() const { return is_valid() ? names[encoding()] : "noreg"; } -const char* FloatRegisterImpl::name() const { +const char* FloatRegister::FloatRegisterImpl::name() const { static const char *const names[number_of_registers] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", @@ -62,7 +50,7 @@ const char* FloatRegisterImpl::name() const { return is_valid() ? names[encoding()] : "noreg"; } -const char* VectorRegisterImpl::name() const { +const char* VectorRegister::VectorRegisterImpl::name() const { static const char *const names[number_of_registers] = { "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", diff --git a/src/hotspot/cpu/riscv/register_riscv.hpp b/src/hotspot/cpu/riscv/register_riscv.hpp index a9200cac647b4..b5e9e2015c0b0 100644 --- a/src/hotspot/cpu/riscv/register_riscv.hpp +++ b/src/hotspot/cpu/riscv/register_riscv.hpp @@ -27,6 +27,7 @@ #define CPU_RISCV_REGISTER_RISCV_HPP #include "asm/register.hpp" +#include "utilities/powerOfTwo.hpp" #define CSR_FFLAGS 0x001 // Floating-Point Accrued Exceptions. #define CSR_FRM 0x002 // Floating-Point Dynamic Rounding Mode. @@ -45,14 +46,11 @@ class VMRegImpl; typedef VMRegImpl* VMReg; -// Use Register as shortcut -class RegisterImpl; -typedef const RegisterImpl* Register; +class Register { + private: + int _encoding; -inline constexpr Register as_Register(int encoding); - -class RegisterImpl: public AbstractRegisterImpl { - static constexpr Register first(); + constexpr explicit Register(int encoding) : _encoding(encoding) {} public: enum { @@ -65,86 +63,114 @@ class RegisterImpl: public AbstractRegisterImpl { compressed_register_top = 15, }; - // derived registers, offsets, and addresses - const Register successor() const { return this + 1; } + class RegisterImpl: public AbstractRegisterImpl { + friend class Register; - // construction - inline friend constexpr Register as_Register(int encoding); + static constexpr const RegisterImpl* first(); - VMReg as_VMReg() const; + public: + // accessors + int raw_encoding() const { return this - first(); } + int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); } + bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; } - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } - int encoding_nocheck() const { return this - first(); } - bool is_valid() const { return (unsigned)encoding_nocheck() < number_of_registers; } - const char* name() const; + // for rvc + int compressed_raw_encoding() const { + return raw_encoding() - compressed_register_base; + } - // for rvc - int compressed_encoding() const { - assert(is_compressed_valid(), "invalid compressed register"); - return encoding() - compressed_register_base; - } + int compressed_encoding() const { + assert(is_compressed_valid(), "invalid compressed register"); + return encoding() - compressed_register_base; + } - int compressed_encoding_nocheck() const { - return encoding_nocheck() - compressed_register_base; - } + bool is_compressed_valid() const { + return raw_encoding() >= compressed_register_base && + raw_encoding() <= compressed_register_top; + } - bool is_compressed_valid() const { - return encoding_nocheck() >= compressed_register_base && - encoding_nocheck() <= compressed_register_top; - } + // derived registers, offsets, and addresses + inline Register successor() const; + + VMReg as_VMReg() const; + + const char* name() const; + }; + + inline friend constexpr Register as_Register(int encoding); + + constexpr Register() : _encoding(-1) {} // noreg + + int operator==(const Register r) const { return _encoding == r._encoding; } + int operator!=(const Register r) const { return _encoding != r._encoding; } + + const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; } }; -REGISTER_IMPL_DECLARATION(Register, RegisterImpl, RegisterImpl::number_of_registers); - -// The integer registers of the RISCV architecture - -CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); - -CONSTANT_REGISTER_DECLARATION(Register, x0, (0)); -CONSTANT_REGISTER_DECLARATION(Register, x1, (1)); -CONSTANT_REGISTER_DECLARATION(Register, x2, (2)); -CONSTANT_REGISTER_DECLARATION(Register, x3, (3)); -CONSTANT_REGISTER_DECLARATION(Register, x4, (4)); -CONSTANT_REGISTER_DECLARATION(Register, x5, (5)); -CONSTANT_REGISTER_DECLARATION(Register, x6, (6)); -CONSTANT_REGISTER_DECLARATION(Register, x7, (7)); -CONSTANT_REGISTER_DECLARATION(Register, x8, (8)); -CONSTANT_REGISTER_DECLARATION(Register, x9, (9)); -CONSTANT_REGISTER_DECLARATION(Register, x10, (10)); -CONSTANT_REGISTER_DECLARATION(Register, x11, (11)); -CONSTANT_REGISTER_DECLARATION(Register, x12, (12)); -CONSTANT_REGISTER_DECLARATION(Register, x13, (13)); -CONSTANT_REGISTER_DECLARATION(Register, x14, (14)); -CONSTANT_REGISTER_DECLARATION(Register, x15, (15)); -CONSTANT_REGISTER_DECLARATION(Register, x16, (16)); -CONSTANT_REGISTER_DECLARATION(Register, x17, (17)); -CONSTANT_REGISTER_DECLARATION(Register, x18, (18)); -CONSTANT_REGISTER_DECLARATION(Register, x19, (19)); -CONSTANT_REGISTER_DECLARATION(Register, x20, (20)); -CONSTANT_REGISTER_DECLARATION(Register, x21, (21)); -CONSTANT_REGISTER_DECLARATION(Register, x22, (22)); -CONSTANT_REGISTER_DECLARATION(Register, x23, (23)); -CONSTANT_REGISTER_DECLARATION(Register, x24, (24)); -CONSTANT_REGISTER_DECLARATION(Register, x25, (25)); -CONSTANT_REGISTER_DECLARATION(Register, x26, (26)); -CONSTANT_REGISTER_DECLARATION(Register, x27, (27)); -CONSTANT_REGISTER_DECLARATION(Register, x28, (28)); -CONSTANT_REGISTER_DECLARATION(Register, x29, (29)); -CONSTANT_REGISTER_DECLARATION(Register, x30, (30)); -CONSTANT_REGISTER_DECLARATION(Register, x31, (31)); - -// Use FloatRegister as shortcut -class FloatRegisterImpl; -typedef const FloatRegisterImpl* FloatRegister; - -inline constexpr FloatRegister as_FloatRegister(int encoding); +extern Register::RegisterImpl all_RegisterImpls[Register::number_of_registers + 1] INTERNAL_VISIBILITY; + +inline constexpr const Register::RegisterImpl* Register::RegisterImpl::first() { + return all_RegisterImpls + 1; +} + +constexpr Register noreg = Register(); + +inline constexpr Register as_Register(int encoding) { + if (0 <= encoding && encoding < Register::number_of_registers) { + return Register(encoding); + } + return noreg; +} + +inline Register Register::RegisterImpl::successor() const { + assert(is_valid(), "sanity"); + return as_Register(encoding() + 1); +} + +// The integer registers of RISCV architecture +constexpr Register x0 = as_Register( 0); +constexpr Register x1 = as_Register( 1); +constexpr Register x2 = as_Register( 2); +constexpr Register x3 = as_Register( 3); +constexpr Register x4 = as_Register( 4); +constexpr Register x5 = as_Register( 5); +constexpr Register x6 = as_Register( 6); +constexpr Register x7 = as_Register( 7); +constexpr Register x8 = as_Register( 8); +constexpr Register x9 = as_Register( 9); +constexpr Register x10 = as_Register(10); +constexpr Register x11 = as_Register(11); +constexpr Register x12 = as_Register(12); +constexpr Register x13 = as_Register(13); +constexpr Register x14 = as_Register(14); +constexpr Register x15 = as_Register(15); +constexpr Register x16 = as_Register(16); +constexpr Register x17 = as_Register(17); +constexpr Register x18 = as_Register(18); +constexpr Register x19 = as_Register(19); +constexpr Register x20 = as_Register(20); +constexpr Register x21 = as_Register(21); +constexpr Register x22 = as_Register(22); +constexpr Register x23 = as_Register(23); +constexpr Register x24 = as_Register(24); +constexpr Register x25 = as_Register(25); +constexpr Register x26 = as_Register(26); +constexpr Register x27 = as_Register(27); +constexpr Register x28 = as_Register(28); +constexpr Register x29 = as_Register(29); +constexpr Register x30 = as_Register(30); +constexpr Register x31 = as_Register(31); // The implementation of floating point registers for the architecture -class FloatRegisterImpl: public AbstractRegisterImpl { - static constexpr FloatRegister first(); +class FloatRegister { + private: + int _encoding; + + constexpr explicit FloatRegister(int encoding) : _encoding(encoding) {} public: + inline friend constexpr FloatRegister as_FloatRegister(int encoding); + enum { number_of_registers = 32, max_slots_per_register = 2, @@ -154,171 +180,235 @@ class FloatRegisterImpl: public AbstractRegisterImpl { compressed_register_top = 15, }; - // construction - inline friend constexpr FloatRegister as_FloatRegister(int encoding); + class FloatRegisterImpl: public AbstractRegisterImpl { + friend class FloatRegister; - VMReg as_VMReg() const; + static constexpr const FloatRegisterImpl* first(); - // derived registers, offsets, and addresses - FloatRegister successor() const { - return as_FloatRegister((encoding() + 1) % (unsigned)number_of_registers); - } + public: + // accessors + int raw_encoding() const { return this - first(); } + int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); } + bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; } - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } - int encoding_nocheck() const { return this - first(); } - int is_valid() const { return (unsigned)encoding_nocheck() < number_of_registers; } - const char* name() const; + // for rvc + int compressed_raw_encoding() const { + return raw_encoding() - compressed_register_base; + } - // for rvc - int compressed_encoding() const { - assert(is_compressed_valid(), "invalid compressed register"); - return encoding() - compressed_register_base; - } + int compressed_encoding() const { + assert(is_compressed_valid(), "invalid compressed register"); + return encoding() - compressed_register_base; + } - int compressed_encoding_nocheck() const { - return encoding_nocheck() - compressed_register_base; - } + bool is_compressed_valid() const { + return raw_encoding() >= compressed_register_base && + raw_encoding() <= compressed_register_top; + } - bool is_compressed_valid() const { - return encoding_nocheck() >= compressed_register_base && - encoding_nocheck() <= compressed_register_top; - } + // derived registers, offsets, and addresses + inline FloatRegister successor() const; + + VMReg as_VMReg() const; + + const char* name() const; + }; + + constexpr FloatRegister() : _encoding(-1) {} // fnoreg + + int operator==(const FloatRegister r) const { return _encoding == r._encoding; } + int operator!=(const FloatRegister r) const { return _encoding != r._encoding; } + + const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; } }; -REGISTER_IMPL_DECLARATION(FloatRegister, FloatRegisterImpl, FloatRegisterImpl::number_of_registers); +extern FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1] INTERNAL_VISIBILITY; -// The float registers of the RISCV architecture +inline constexpr const FloatRegister::FloatRegisterImpl* FloatRegister::FloatRegisterImpl::first() { + return all_FloatRegisterImpls + 1; +} + +constexpr FloatRegister fnoreg = FloatRegister(); + +inline constexpr FloatRegister as_FloatRegister(int encoding) { + if (0 <= encoding && encoding < FloatRegister::number_of_registers) { + return FloatRegister(encoding); + } + return fnoreg; +} + +inline FloatRegister FloatRegister::FloatRegisterImpl::successor() const { + assert(is_valid(), "sanity"); + return as_FloatRegister(encoding() + 1); +} -CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg , (-1)); - -CONSTANT_REGISTER_DECLARATION(FloatRegister, f0 , ( 0)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f1 , ( 1)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f2 , ( 2)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f3 , ( 3)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f4 , ( 4)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f5 , ( 5)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f6 , ( 6)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f7 , ( 7)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f8 , ( 8)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f9 , ( 9)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f10 , (10)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f11 , (11)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f12 , (12)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f13 , (13)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f14 , (14)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f15 , (15)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f16 , (16)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f17 , (17)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f18 , (18)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f19 , (19)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f20 , (20)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f21 , (21)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f22 , (22)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f23 , (23)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f24 , (24)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f25 , (25)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f26 , (26)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f27 , (27)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f28 , (28)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f29 , (29)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f30 , (30)); -CONSTANT_REGISTER_DECLARATION(FloatRegister, f31 , (31)); - -// Use VectorRegister as shortcut -class VectorRegisterImpl; -typedef const VectorRegisterImpl* VectorRegister; - -inline constexpr VectorRegister as_VectorRegister(int encoding); +// The float registers of the RISCV architecture +constexpr FloatRegister f0 = as_FloatRegister( 0); +constexpr FloatRegister f1 = as_FloatRegister( 1); +constexpr FloatRegister f2 = as_FloatRegister( 2); +constexpr FloatRegister f3 = as_FloatRegister( 3); +constexpr FloatRegister f4 = as_FloatRegister( 4); +constexpr FloatRegister f5 = as_FloatRegister( 5); +constexpr FloatRegister f6 = as_FloatRegister( 6); +constexpr FloatRegister f7 = as_FloatRegister( 7); +constexpr FloatRegister f8 = as_FloatRegister( 8); +constexpr FloatRegister f9 = as_FloatRegister( 9); +constexpr FloatRegister f10 = as_FloatRegister(10); +constexpr FloatRegister f11 = as_FloatRegister(11); +constexpr FloatRegister f12 = as_FloatRegister(12); +constexpr FloatRegister f13 = as_FloatRegister(13); +constexpr FloatRegister f14 = as_FloatRegister(14); +constexpr FloatRegister f15 = as_FloatRegister(15); +constexpr FloatRegister f16 = as_FloatRegister(16); +constexpr FloatRegister f17 = as_FloatRegister(17); +constexpr FloatRegister f18 = as_FloatRegister(18); +constexpr FloatRegister f19 = as_FloatRegister(19); +constexpr FloatRegister f20 = as_FloatRegister(20); +constexpr FloatRegister f21 = as_FloatRegister(21); +constexpr FloatRegister f22 = as_FloatRegister(22); +constexpr FloatRegister f23 = as_FloatRegister(23); +constexpr FloatRegister f24 = as_FloatRegister(24); +constexpr FloatRegister f25 = as_FloatRegister(25); +constexpr FloatRegister f26 = as_FloatRegister(26); +constexpr FloatRegister f27 = as_FloatRegister(27); +constexpr FloatRegister f28 = as_FloatRegister(28); +constexpr FloatRegister f29 = as_FloatRegister(29); +constexpr FloatRegister f30 = as_FloatRegister(30); +constexpr FloatRegister f31 = as_FloatRegister(31); // The implementation of vector registers for RVV -class VectorRegisterImpl: public AbstractRegisterImpl { - static constexpr VectorRegister first(); +class VectorRegister { + int _encoding; + + constexpr explicit VectorRegister(int encoding) : _encoding(encoding) {} public: + inline friend constexpr VectorRegister as_VectorRegister(int encoding); + enum { number_of_registers = 32, max_slots_per_register = 4 }; - // construction - inline friend constexpr VectorRegister as_VectorRegister(int encoding); + class VectorRegisterImpl: public AbstractRegisterImpl { + friend class VectorRegister; + + static constexpr const VectorRegisterImpl* first(); + + public: + // accessors + int raw_encoding() const { return this - first(); } + int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); } + bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; } - VMReg as_VMReg() const; + // derived registers, offsets, and addresses + inline VectorRegister successor() const; - // derived registers, offsets, and addresses - VectorRegister successor() const { return this + 1; } + VMReg as_VMReg() const; - // accessors - int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); } - int encoding_nocheck() const { return this - first(); } - bool is_valid() const { return (unsigned)encoding_nocheck() < number_of_registers; } - const char* name() const; + const char* name() const; + }; + + constexpr VectorRegister() : _encoding(-1) {} // vnoreg + int operator==(const VectorRegister r) const { return _encoding == r._encoding; } + int operator!=(const VectorRegister r) const { return _encoding != r._encoding; } + + const VectorRegisterImpl* operator->() const { return VectorRegisterImpl::first() + _encoding; } }; -REGISTER_IMPL_DECLARATION(VectorRegister, VectorRegisterImpl, VectorRegisterImpl::number_of_registers); +extern VectorRegister::VectorRegisterImpl all_VectorRegisterImpls[VectorRegister::number_of_registers + 1] INTERNAL_VISIBILITY; -// The vector registers of RVV -CONSTANT_REGISTER_DECLARATION(VectorRegister, vnoreg , (-1)); - -CONSTANT_REGISTER_DECLARATION(VectorRegister, v0 , ( 0)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v1 , ( 1)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v2 , ( 2)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v3 , ( 3)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v4 , ( 4)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v5 , ( 5)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v6 , ( 6)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v7 , ( 7)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v8 , ( 8)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v9 , ( 9)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v10 , (10)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v11 , (11)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v12 , (12)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v13 , (13)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v14 , (14)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v15 , (15)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v16 , (16)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v17 , (17)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v18 , (18)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v19 , (19)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v20 , (20)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v21 , (21)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v22 , (22)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v23 , (23)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v24 , (24)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v25 , (25)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v26 , (26)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v27 , (27)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v28 , (28)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v29 , (29)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v30 , (30)); -CONSTANT_REGISTER_DECLARATION(VectorRegister, v31 , (31)); +inline constexpr const VectorRegister::VectorRegisterImpl* VectorRegister::VectorRegisterImpl::first() { + return all_VectorRegisterImpls + 1; +} + +constexpr VectorRegister vnoreg = VectorRegister(); + +inline constexpr VectorRegister as_VectorRegister(int encoding) { + if (0 <= encoding && encoding < VectorRegister::number_of_registers) { + return VectorRegister(encoding); + } + return vnoreg; +} +inline VectorRegister VectorRegister::VectorRegisterImpl::successor() const { + assert(is_valid(), "sanity"); + return as_VectorRegister(encoding() + 1); +} + +// The vector registers of RVV +constexpr VectorRegister v0 = as_VectorRegister( 0); +constexpr VectorRegister v1 = as_VectorRegister( 1); +constexpr VectorRegister v2 = as_VectorRegister( 2); +constexpr VectorRegister v3 = as_VectorRegister( 3); +constexpr VectorRegister v4 = as_VectorRegister( 4); +constexpr VectorRegister v5 = as_VectorRegister( 5); +constexpr VectorRegister v6 = as_VectorRegister( 6); +constexpr VectorRegister v7 = as_VectorRegister( 7); +constexpr VectorRegister v8 = as_VectorRegister( 8); +constexpr VectorRegister v9 = as_VectorRegister( 9); +constexpr VectorRegister v10 = as_VectorRegister(10); +constexpr VectorRegister v11 = as_VectorRegister(11); +constexpr VectorRegister v12 = as_VectorRegister(12); +constexpr VectorRegister v13 = as_VectorRegister(13); +constexpr VectorRegister v14 = as_VectorRegister(14); +constexpr VectorRegister v15 = as_VectorRegister(15); +constexpr VectorRegister v16 = as_VectorRegister(16); +constexpr VectorRegister v17 = as_VectorRegister(17); +constexpr VectorRegister v18 = as_VectorRegister(18); +constexpr VectorRegister v19 = as_VectorRegister(19); +constexpr VectorRegister v20 = as_VectorRegister(20); +constexpr VectorRegister v21 = as_VectorRegister(21); +constexpr VectorRegister v22 = as_VectorRegister(22); +constexpr VectorRegister v23 = as_VectorRegister(23); +constexpr VectorRegister v24 = as_VectorRegister(24); +constexpr VectorRegister v25 = as_VectorRegister(25); +constexpr VectorRegister v26 = as_VectorRegister(26); +constexpr VectorRegister v27 = as_VectorRegister(27); +constexpr VectorRegister v28 = as_VectorRegister(28); +constexpr VectorRegister v29 = as_VectorRegister(29); +constexpr VectorRegister v30 = as_VectorRegister(30); +constexpr VectorRegister v31 = as_VectorRegister(31); // Need to know the total number of registers of all sorts for SharedInfo. // Define a class that exports it. class ConcreteRegisterImpl : public AbstractRegisterImpl { public: enum { - // A big enough number for C2: all the registers plus flags - // This number must be large enough to cover REG_COUNT (defined by c2) registers. - // There is no requirement that any ordering here matches any ordering c2 gives - // it's optoregs. - - number_of_registers = (RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers + - FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers + - VectorRegisterImpl::max_slots_per_register * VectorRegisterImpl::number_of_registers) + max_gpr = Register::number_of_registers * Register::max_slots_per_register, + max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register, + max_vpr = max_fpr + VectorRegister::number_of_registers * VectorRegister::max_slots_per_register, + + // A big enough number for C2: all the registers plus flags + // This number must be large enough to cover REG_COUNT (defined by c2) registers. + // There is no requirement that any ordering here matches any ordering c2 gives + // it's optoregs. + number_of_registers = max_vpr // gpr/fpr/vpr }; - - // added to make it compile - static const int max_gpr; - static const int max_fpr; - static const int max_vpr; }; typedef AbstractRegSet RegSet; typedef AbstractRegSet FloatRegSet; typedef AbstractRegSet VectorRegSet; + +template <> +inline Register AbstractRegSet::first() { + uint32_t first = _bitset & -_bitset; + return first ? as_Register(exact_log2(first)) : noreg; +} + +template <> +inline FloatRegister AbstractRegSet::first() { + uint32_t first = _bitset & -_bitset; + return first ? as_FloatRegister(exact_log2(first)) : fnoreg; +} + +template<> +inline VectorRegister AbstractRegSet::first() { + uint32_t first = _bitset & -_bitset; + return first ? as_VectorRegister(exact_log2(first)) : vnoreg; +} + #endif // CPU_RISCV_REGISTER_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/relocInfo_riscv.cpp b/src/hotspot/cpu/riscv/relocInfo_riscv.cpp index 228a64eae2c64..716124b8f0416 100644 --- a/src/hotspot/cpu/riscv/relocInfo_riscv.cpp +++ b/src/hotspot/cpu/riscv/relocInfo_riscv.cpp @@ -41,7 +41,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { switch (type()) { case relocInfo::oop_type: { oop_Relocation *reloc = (oop_Relocation *)this; - // in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate + // in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() != NULL if (NativeInstruction::is_load_pc_relative_at(addr())) { address constptr = (address)code()->oop_addr_at(reloc->oop_index()); bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr); diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index dde2e1038976e..4393e84cd60b2 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1342,7 +1342,24 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { if (C->stub_function() == NULL) { BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->nmethod_entry_barrier(&_masm); + if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) { + // Dummy labels for just measuring the code size + Label dummy_slow_path; + Label dummy_continuation; + Label dummy_guard; + Label* slow_path = &dummy_slow_path; + Label* continuation = &dummy_continuation; + Label* guard = &dummy_guard; + if (!Compile::current()->output()->in_scratch_emit_size()) { + // Use real labels from actual stub when not emitting code for purpose of measuring its size + C2EntryBarrierStub* stub = Compile::current()->output()->entry_barrier_table()->add_entry_barrier(); + slow_path = &stub->slow_path(); + continuation = &stub->continuation(); + guard = &stub->guard(); + } + // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub. + bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard); + } } if (VerifyStackAtCalls) { @@ -1452,19 +1469,19 @@ static enum RC rc_class(OptoReg::Name reg) { // we have 30 int registers * 2 halves // (t0 and t1 are omitted) - int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2); + int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2); if (reg < slots_of_int_registers) { return rc_int; } // we have 32 float register * 2 halves - int slots_of_float_registers = FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers; + int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers; if (reg < slots_of_int_registers + slots_of_float_registers) { return rc_float; } // we have 32 vector register * 4 halves - int slots_of_vector_registers = VectorRegisterImpl::max_slots_per_register * VectorRegisterImpl::number_of_registers; + int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers; if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) { return rc_vector; } @@ -2097,7 +2114,7 @@ encode %{ } else { relocInfo::relocType rtype = $src->constant_reloc(); if (rtype == relocInfo::oop_type) { - __ movoop(dst_reg, (jobject)con, /*immediate*/true); + __ movoop(dst_reg, (jobject)con); } else if (rtype == relocInfo::metadata_type) { __ mov_metadata(dst_reg, (Metadata*)con); } else { @@ -5650,14 +5667,13 @@ instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iReg format %{ "cmpxchg_weak $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapB" + "# $res == 1 when success, #@weakCompareAndSwapB" %} ins_encode %{ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5674,14 +5690,13 @@ instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iReg format %{ "cmpxchg_weak $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapS" + "# $res == 1 when success, #@weakCompareAndSwapS" %} ins_encode %{ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5695,13 +5710,12 @@ instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI ne format %{ "cmpxchg_weak $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapI" + "# $res == 1 when success, #@weakCompareAndSwapI" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5715,13 +5729,12 @@ instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL ne format %{ "cmpxchg_weak $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapL" + "# $res == 1 when success, #@weakCompareAndSwapL" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5735,13 +5748,12 @@ instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN ne format %{ "cmpxchg_weak $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapN" + "# $res == 1 when success, #@weakCompareAndSwapN" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5756,13 +5768,12 @@ instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne format %{ "cmpxchg_weak $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapP" + "# $res == 1 when success, #@weakCompareAndSwapP" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5781,14 +5792,13 @@ instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, i format %{ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapBAcq" + "# $res == 1 when success, #@weakCompareAndSwapBAcq" %} ins_encode %{ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8, /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5807,14 +5817,13 @@ instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, i format %{ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapSAcq" + "# $res == 1 when success, #@weakCompareAndSwapSAcq" %} ins_encode %{ __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16, /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5830,13 +5839,12 @@ instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI format %{ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapIAcq" + "# $res == 1 when success, #@weakCompareAndSwapIAcq" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32, /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5852,13 +5860,12 @@ instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL format %{ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapLAcq" + "# $res == 1 when success, #@weakCompareAndSwapLAcq" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5874,13 +5881,12 @@ instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN format %{ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapNAcq" + "# $res == 1 when success, #@weakCompareAndSwapNAcq" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32, /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); @@ -5896,13 +5902,12 @@ instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP format %{ "cmpxchg_weak_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t" - "xori $res, $res, 1\t# $res == 1 when success, #@weakCompareAndSwapPAcq" + "\t# $res == 1 when success, #@weakCompareAndSwapPAcq" %} ins_encode %{ __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64, /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register); - __ xori($res$$Register, $res$$Register, 1); %} ins_pipe(pipe_slow); diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index dd5bbe7cba587..c833438ca4fc5 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -111,7 +111,7 @@ class RegisterSaver { int f0_offset = 0; #ifdef COMPILER2 if (_save_vectors) { - f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegisterImpl::number_of_registers * + f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegister::number_of_registers * BytesPerInt; } #endif @@ -119,8 +119,8 @@ class RegisterSaver { } int reserved_slot_offset_in_bytes(void) { return f0_offset_in_bytes() + - FloatRegisterImpl::max_slots_per_register * - FloatRegisterImpl::number_of_registers * + FloatRegister::max_slots_per_register * + FloatRegister::number_of_registers * BytesPerInt; } @@ -135,8 +135,8 @@ class RegisterSaver { int ra_offset_in_bytes(void) { return reserved_slot_offset_in_bytes() + - (RegisterImpl::number_of_registers - 3) * - RegisterImpl::max_slots_per_register * + (Register::number_of_registers - 3) * + Register::max_slots_per_register * BytesPerInt; } }; @@ -151,7 +151,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_ } #endif - assert_cond(masm != NULL && total_frame_words != NULL); int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16); // OopMap frame size is in compiler stack slots (jint's) not bytes or words int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; @@ -178,24 +177,24 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_ int step_in_slots = 0; if (_save_vectors) { step_in_slots = vector_size_in_slots; - for (int i = 0; i < VectorRegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { + for (int i = 0; i < VectorRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { VectorRegister r = as_VectorRegister(i); oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg()); } } - step_in_slots = FloatRegisterImpl::max_slots_per_register; - for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { + step_in_slots = FloatRegister::max_slots_per_register; + for (int i = 0; i < FloatRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { FloatRegister r = as_FloatRegister(i); oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg()); } - step_in_slots = RegisterImpl::max_slots_per_register; + step_in_slots = Register::max_slots_per_register; // skip the slot reserved for alignment, see MacroAssembler::push_reg; // also skip x5 ~ x6 on the stack because they are caller-saved registers. - sp_offset_in_slots += RegisterImpl::max_slots_per_register * 3; + sp_offset_in_slots += Register::max_slots_per_register * 3; // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack. - for (int i = 7; i < RegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { + for (int i = 7; i < Register::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { Register r = as_Register(i); if (r != xthread) { oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg()); @@ -206,7 +205,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_ } void RegisterSaver::restore_live_registers(MacroAssembler* masm) { - assert_cond(masm != NULL); #ifdef COMPILER2 __ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE)); #else @@ -242,8 +240,8 @@ static int reg2offset_out(VMReg r) { // refer to 4-byte stack slots. All stack slots are based off of the stack pointer // as framesizes are fixed. // VMRegImpl::stack0 refers to the first slot 0(sp). -// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register -// up to RegisterImpl::number_of_registers) are the 64-bit +// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. +// Register up to Register::number_of_registers) are the 64-bit // integer registers. // Note: the INPUTS in sig_bt are in units of Java argument words, @@ -331,7 +329,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt, // Patch the callers callsite with entry to compiled code if it exists. static void patch_callers_callsite(MacroAssembler *masm) { - assert_cond(masm != NULL); Label L; __ ld(t0, Address(xmethod, in_bytes(Method::code_offset()))); __ beqz(t0, L); @@ -769,7 +766,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt, // 32bits for a parameter. On 32bit it will simply be 32 bits // So this routine will do 32->32 on 32bit and 32->64 on 64bit static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { - assert_cond(masm != NULL); if (src.first()->is_stack()) { if (dst.first()->is_stack()) { // stack to stack @@ -799,7 +795,6 @@ static void object_move(MacroAssembler* masm, VMRegPair dst, bool is_receiver, int* receiver_offset) { - assert_cond(masm != NULL && map != NULL && receiver_offset != NULL); // must pass a handle. First figure out the location we use as a handle Register rHandle = dst.first()->is_stack() ? t1 : dst.first()->as_Register(); @@ -882,7 +877,6 @@ static void object_move(MacroAssembler* masm, static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { assert(src.first()->is_stack() && dst.first()->is_stack() || src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error"); - assert_cond(masm != NULL); if (src.first()->is_stack()) { if (dst.first()->is_stack()) { __ lwu(t0, Address(fp, reg2offset_in(src.first()))); @@ -903,7 +897,6 @@ static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { // A long move static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { - assert_cond(masm != NULL); if (src.first()->is_stack()) { if (dst.first()->is_stack()) { // stack to stack @@ -927,7 +920,6 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { assert(src.first()->is_stack() && dst.first()->is_stack() || src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error"); - assert_cond(masm != NULL); if (src.first()->is_stack()) { if (dst.first()->is_stack()) { __ ld(t0, Address(fp, reg2offset_in(src.first()))); @@ -947,7 +939,6 @@ static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { } void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { - assert_cond(masm != NULL); // We always ignore the frame_slots arg and just use the space just below frame pointer // which by this time is free to use switch (ret_type) { @@ -965,7 +956,6 @@ void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, } void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { - assert_cond(masm != NULL); // We always ignore the frame_slots arg and just use the space just below frame pointer // which by this time is free to use switch (ret_type) { @@ -983,7 +973,6 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty } static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { - assert_cond(masm != NULL && args != NULL); RegSet x; for ( int i = first_arg ; i < arg_count ; i++ ) { if (args[i].first()->is_Register()) { @@ -997,7 +986,6 @@ static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegP } static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { - assert_cond(masm != NULL && args != NULL); RegSet x; for ( int i = first_arg ; i < arg_count ; i++ ) { if (args[i].first()->is_Register()) { @@ -1018,7 +1006,6 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR } static void rt_call(MacroAssembler* masm, address dest) { - assert_cond(masm != NULL); CodeBlob *cb = CodeCache::find_blob(dest); if (cb) { __ far_call(RuntimeAddress(dest)); @@ -1325,7 +1312,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); assert_cond(bs != NULL); - bs->nmethod_entry_barrier(masm); + bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */); // Frame is now completed as far as size and linkage. int frame_complete = ((intptr_t)__ pc()) - start; @@ -1368,12 +1355,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, int int_args = 0; #ifdef ASSERT - bool reg_destroyed[RegisterImpl::number_of_registers]; - bool freg_destroyed[FloatRegisterImpl::number_of_registers]; - for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { + bool reg_destroyed[Register::number_of_registers]; + bool freg_destroyed[FloatRegister::number_of_registers]; + for ( int r = 0 ; r < Register::number_of_registers ; r++ ) { reg_destroyed[r] = false; } - for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { + for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) { freg_destroyed[f] = false; } @@ -1455,8 +1442,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // load oop into a register __ movoop(c_rarg1, - JNIHandles::make_local(method->method_holder()->java_mirror()), - /*immediate*/true); + JNIHandles::make_local(method->method_holder()->java_mirror())); // Now handlize the static class mirror it's known not-null. __ sd(c_rarg1, Address(sp, klass_offset)); @@ -2536,7 +2522,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t // must do any gc of the args. // RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { - assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); + assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before"); // allocate space for the code ResourceMark rm; diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index 4a923150a5c4a..274fdbfd8fce2 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -2341,6 +2341,17 @@ class StubGenerator: public StubCodeGenerator { address start = __ pc(); + BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); + + if (bs_asm->nmethod_patching_type() == NMethodPatchingType::conc_instruction_and_data_patch) { + BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); + Address thread_epoch_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()) + 4); + __ la(t1, ExternalAddress(bs_asm->patching_epoch_addr())); + __ lwu(t1, t1); + __ sw(t1, thread_epoch_addr); + __ membar(__ LoadLoad); + } + __ set_last_Java_frame(sp, fp, ra, t0); __ enter(); @@ -2935,32 +2946,32 @@ class StubGenerator: public StubCodeGenerator { // Register allocation - Register reg = c_rarg0; - Pa_base = reg; // Argument registers + RegSetIterator regs = RegSet::range(x10, x26).begin(); + Pa_base = *regs; // Argument registers if (squaring) { Pb_base = Pa_base; } else { - Pb_base = ++reg; + Pb_base = *++regs; } - Pn_base = ++reg; - Rlen= ++reg; - inv = ++reg; - Pm_base = ++reg; + Pn_base = *++regs; + Rlen= *++regs; + inv = *++regs; + Pm_base = *++regs; // Working registers: - Ra = ++reg; // The current digit of a, b, n, and m. - Rb = ++reg; - Rm = ++reg; - Rn = ++reg; + Ra = *++regs; // The current digit of a, b, n, and m. + Rb = *++regs; + Rm = *++regs; + Rn = *++regs; - Pa = ++reg; // Pointers to the current/next digit of a, b, n, and m. - Pb = ++reg; - Pm = ++reg; - Pn = ++reg; + Pa = *++regs; // Pointers to the current/next digit of a, b, n, and m. + Pb = *++regs; + Pm = *++regs; + Pn = *++regs; - tmp0 = ++reg; // Three registers which form a - tmp1 = ++reg; // triple-precision accumuator. - tmp2 = ++reg; + tmp0 = *++regs; // Three registers which form a + tmp1 = *++regs; // triple-precision accumuator. + tmp2 = *++regs; Ri = x6; // Inner and outer loop indexes. Rj = x7; @@ -2971,7 +2982,7 @@ class StubGenerator: public StubCodeGenerator { Rlo_mn = x31; // x18 and up are callee-saved. - _toSave = RegSet::range(x18, reg) + Pm_base; + _toSave = RegSet::range(x18, *regs) + Pm_base; } private: @@ -3227,7 +3238,8 @@ class StubGenerator: public StubCodeGenerator { // Preserves len // Leaves s pointing to the address which was in d at start void reverse(Register d, Register s, Register len, Register tmp1, Register tmp2) { - assert(tmp1 < x28 && tmp2 < x28, "register corruption"); + assert(tmp1->encoding() < x28->encoding(), "register corruption"); + assert(tmp2->encoding() < x28->encoding(), "register corruption"); slli(tmp1, len, LogBytesPerWord); add(s, s, tmp1); diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp index 1ae4e857580fd..e83032201b0c6 100644 --- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp @@ -932,7 +932,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // native method than the typical interpreter frame setup. address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // determine code generation flags - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // x11: Method* // x30: sender sp @@ -1326,7 +1326,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { // determine code generation flags - const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + const bool inc_counter = UseCompiler || CountCompiledCalls; // t0: sender sp address entry_point = __ pc(); diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp index e1631667c81fd..1e12ccb5f61db 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp @@ -70,15 +70,12 @@ static inline Address aaddress(int n) { return iaddress(n); } -static inline Address iaddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { - assert_cond(_masm != NULL); +static inline Address iaddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { _masm->shadd(temp, r, xlocals, temp, 3); return Address(temp, 0); } -static inline Address laddress(Register r, Register temp, - InterpreterMacroAssembler* _masm) { - assert_cond(_masm != NULL); +static inline Address laddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { _masm->shadd(temp, r, xlocals, temp, 3); return Address(temp, Interpreter::local_offset_in_bytes(1));; } @@ -87,8 +84,7 @@ static inline Address faddress(Register r, Register temp, InterpreterMacroAssemb return iaddress(r, temp, _masm); } -static inline Address daddress(Register r, Register temp, - InterpreterMacroAssembler* _masm) { +static inline Address daddress(Register r, Register temp, InterpreterMacroAssembler* _masm) { return laddress(r, temp, _masm); } @@ -134,7 +130,6 @@ static void do_oop_store(InterpreterMacroAssembler* _masm, Register val, DecoratorSet decorators) { assert(val == noreg || val == x10, "parameter is just for looks"); - assert_cond(_masm != NULL); __ store_heap_oop(dst, val, x29, x11, decorators); } @@ -142,7 +137,6 @@ static void do_oop_load(InterpreterMacroAssembler* _masm, Address src, Register dst, DecoratorSet decorators) { - assert_cond(_masm != NULL); __ load_heap_oop(dst, src, x7, x11, decorators); } diff --git a/src/hotspot/cpu/riscv/vmreg_riscv.cpp b/src/hotspot/cpu/riscv/vmreg_riscv.cpp index a4985e7f5693e..007f58d58a18d 100644 --- a/src/hotspot/cpu/riscv/vmreg_riscv.cpp +++ b/src/hotspot/cpu/riscv/vmreg_riscv.cpp @@ -31,7 +31,7 @@ void VMRegImpl::set_regName() { int i = 0; Register reg = ::as_Register(0); for ( ; i < ConcreteRegisterImpl::max_gpr ; ) { - for (int j = 0 ; j < RegisterImpl::max_slots_per_register ; j++) { + for (int j = 0 ; j < Register::max_slots_per_register ; j++) { regName[i++] = reg->name(); } reg = reg->successor(); @@ -39,7 +39,7 @@ void VMRegImpl::set_regName() { FloatRegister freg = ::as_FloatRegister(0); for ( ; i < ConcreteRegisterImpl::max_fpr ; ) { - for (int j = 0 ; j < FloatRegisterImpl::max_slots_per_register ; j++) { + for (int j = 0 ; j < FloatRegister::max_slots_per_register ; j++) { regName[i++] = reg->name(); } freg = freg->successor(); @@ -47,7 +47,7 @@ void VMRegImpl::set_regName() { VectorRegister vreg = ::as_VectorRegister(0); for ( ; i < ConcreteRegisterImpl::max_vpr ; ) { - for (int j = 0 ; j < VectorRegisterImpl::max_slots_per_register ; j++) { + for (int j = 0 ; j < VectorRegister::max_slots_per_register ; j++) { regName[i++] = reg->name(); } vreg = vreg->successor(); diff --git a/src/hotspot/cpu/riscv/vmreg_riscv.hpp b/src/hotspot/cpu/riscv/vmreg_riscv.hpp index 9e611b1f67110..a70a373cfe04b 100644 --- a/src/hotspot/cpu/riscv/vmreg_riscv.hpp +++ b/src/hotspot/cpu/riscv/vmreg_riscv.hpp @@ -40,26 +40,26 @@ inline bool is_VectorRegister() { inline Register as_Register() { assert(is_Register(), "must be"); - return ::as_Register(value() / RegisterImpl::max_slots_per_register); + return ::as_Register(value() / Register::max_slots_per_register); } inline FloatRegister as_FloatRegister() { assert(is_FloatRegister() && is_even(value()), "must be"); return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) / - FloatRegisterImpl::max_slots_per_register); + FloatRegister::max_slots_per_register); } inline VectorRegister as_VectorRegister() { - assert(is_VectorRegister() && ((value() & (VectorRegisterImpl::max_slots_per_register - 1)) == 0), "must be"); + assert(is_VectorRegister() && ((value() & (VectorRegister::max_slots_per_register - 1)) == 0), "must be"); return ::as_VectorRegister((value() - ConcreteRegisterImpl::max_fpr) / - VectorRegisterImpl::max_slots_per_register); + VectorRegister::max_slots_per_register); } inline bool is_concrete() { assert(is_reg(), "must be"); if (is_VectorRegister()) { int base = value() - ConcreteRegisterImpl::max_fpr; - return (base % VectorRegisterImpl::max_slots_per_register) == 0; + return (base % VectorRegister::max_slots_per_register) == 0; } else { return is_even(value()); } diff --git a/src/hotspot/cpu/riscv/vmreg_riscv.inline.hpp b/src/hotspot/cpu/riscv/vmreg_riscv.inline.hpp index 06b70020b4b98..6dce7b26f4894 100644 --- a/src/hotspot/cpu/riscv/vmreg_riscv.inline.hpp +++ b/src/hotspot/cpu/riscv/vmreg_riscv.inline.hpp @@ -26,20 +26,17 @@ #ifndef CPU_RISCV_VM_VMREG_RISCV_INLINE_HPP #define CPU_RISCV_VM_VMREG_RISCV_INLINE_HPP -inline VMReg RegisterImpl::as_VMReg() const { - if (this == noreg) { - return VMRegImpl::Bad(); - } - return VMRegImpl::as_VMReg(encoding() * RegisterImpl::max_slots_per_register); +inline VMReg Register::RegisterImpl::as_VMReg() const { + return VMRegImpl::as_VMReg(encoding() * Register::max_slots_per_register); } -inline VMReg FloatRegisterImpl::as_VMReg() const { - return VMRegImpl::as_VMReg((encoding() * FloatRegisterImpl::max_slots_per_register) + +inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const { + return VMRegImpl::as_VMReg((encoding() * FloatRegister::max_slots_per_register) + ConcreteRegisterImpl::max_gpr); } -inline VMReg VectorRegisterImpl::as_VMReg() const { - return VMRegImpl::as_VMReg((encoding() * VectorRegisterImpl::max_slots_per_register) + +inline VMReg VectorRegister::VectorRegisterImpl::as_VMReg() const { + return VMRegImpl::as_VMReg((encoding() * VectorRegister::max_slots_per_register) + ConcreteRegisterImpl::max_fpr); } diff --git a/src/hotspot/cpu/s390/frame_s390.cpp b/src/hotspot/cpu/s390/frame_s390.cpp index b33c07aef101c..916db1143f349 100644 --- a/src/hotspot/cpu/s390/frame_s390.cpp +++ b/src/hotspot/cpu/s390/frame_s390.cpp @@ -122,16 +122,11 @@ bool frame::safe_for_sender(JavaThread *thread) { address sender_pc = (address) sender_abi->return_pc; // We must always be able to find a recognizable pc. - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); if (sender_blob == NULL) { return false; } - // Could be a zombie method - if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { - return false; - } - // It should be safe to construct the sender though it might not be valid. frame sender(sender_sp, sender_pc); @@ -424,7 +419,7 @@ void frame::back_trace(outputStream* st, intptr_t* start_sp, intptr_t* top_pc, u } } } else if (CodeCache::contains(current_pc)) { - blob = CodeCache::find_blob_unsafe(current_pc); + blob = CodeCache::find_blob(current_pc); if (blob) { if (blob->is_nmethod()) { frame_type = 3; diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index 5833e54a17915..9d8ab1ff1e188 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -1920,7 +1920,7 @@ void InterpreterMacroAssembler::get_method_counters(Register Rmethod, // Return (invocation_counter+backedge_counter) as "result" in RctrSum. // Counter values are all unsigned. void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register RctrSum) { - assert(UseCompiler || LogTouchedMethods, "incrementing must be useful"); + assert(UseCompiler, "incrementing must be useful"); assert_different_registers(Rcounters, RctrSum); int increment = InvocationCounter::count_increment; diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 2eba303cd6550..5a26adf5ec90a 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -4484,7 +4484,7 @@ intptr_t MacroAssembler::get_const_from_toc(address pc) { if (is_load_const_from_toc_pcrelative(pc)) { dataLoc = pc + offset; } else { - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie. + CodeBlob* cb = CodeCache::find_blob(pc); assert(cb && cb->is_nmethod(), "sanity"); nmethod* nm = (nmethod*)cb; dataLoc = nm->ctable_begin() + offset; diff --git a/src/hotspot/cpu/s390/nativeInst_s390.cpp b/src/hotspot/cpu/s390/nativeInst_s390.cpp index 60d17a8a6f11a..c1c395b669784 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.cpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.cpp @@ -168,20 +168,20 @@ bool NativeInstruction::is_illegal() { return halfword_at(-2) == illegal_instruction(); } -// We use an illtrap for marking a method as not_entrant or zombie. -bool NativeInstruction::is_sigill_zombie_not_entrant() { +// We use an illtrap for marking a method as not_entrant. +bool NativeInstruction::is_sigill_not_entrant() { if (!is_illegal()) return false; // Just a quick path. // One-sided error of is_illegal tolerable here // (see implementation of is_illegal() for details). - CodeBlob* cb = CodeCache::find_blob_unsafe(addr_at(0)); + CodeBlob* cb = CodeCache::find_blob(addr_at(0)); if (cb == NULL || !cb->is_nmethod()) { return false; } nmethod *nm = (nmethod *)cb; - // This method is not_entrant or zombie if the illtrap instruction + // This method is not_entrant if the illtrap instruction // is located at the verified entry point. // BE AWARE: the current pc (this) points to the instruction after the // "illtrap" location. diff --git a/src/hotspot/cpu/s390/nativeInst_s390.hpp b/src/hotspot/cpu/s390/nativeInst_s390.hpp index 158a3ade5fd43..8cd03e0cfa098 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.hpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp @@ -85,8 +85,8 @@ class NativeInstruction { // Bcrl is currently the only accepted instruction here. bool is_jump(); - // We use an illtrap for marking a method as not_entrant or zombie. - bool is_sigill_zombie_not_entrant(); + // We use an illtrap for marking a method as not_entrant. + bool is_sigill_not_entrant(); bool is_safepoint_poll() { // Is the current instruction a POTENTIAL read access to the polling page? diff --git a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp index 0d76d72c96699..4ca45cb5e9813 100644 --- a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp @@ -1301,7 +1301,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M // native method than the typical interpreter frame setup. address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // Determine code generation flags. - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // Interpreter entry for ordinary Java methods. // @@ -1658,7 +1658,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { address entry_point = __ pc(); - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // Interpreter entry for ordinary Java methods. // diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp index 15e539d1f5150..f54dda1b13282 100644 --- a/src/hotspot/cpu/x86/assembler_x86.cpp +++ b/src/hotspot/cpu/x86/assembler_x86.cpp @@ -1255,12 +1255,12 @@ void Assembler::emit_operand(Register reg, Address adr, } void Assembler::emit_operand(XMMRegister reg, Address adr) { - if (adr.isxmmindex()) { - emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec); - } else { - emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, - adr._rspec); - } + if (adr.isxmmindex()) { + emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec); + } else { + emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, + adr._rspec); + } } // Now the Assembler instructions (identical for 32/64 bits) @@ -12246,11 +12246,17 @@ static bool is_reachable(address target, relocInfo::relocType reloc_type) { bool Assembler::reachable(AddressLiteral adr) { assert(CodeCache::contains(pc()), "required"); + if (adr.is_lval()) { + return false; + } return is_reachable(adr.target(), adr.reloc()); } bool Assembler::always_reachable(AddressLiteral adr) { assert(CodeCache::contains(pc()), "required"); + if (adr.is_lval()) { + return false; + } return is_always_reachable(adr.target(), adr.reloc()); } diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp index 51909e5fa1473..edbad4147039f 100644 --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp @@ -544,7 +544,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { #ifndef PRODUCT if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt), rscratch1); } #endif diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index ff4117dbffc67..48e6d9aa605ee 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -196,7 +196,7 @@ void LIR_Assembler::push(LIR_Opr opr) { } else if (opr->is_constant()) { LIR_Const* const_opr = opr->as_constant_ptr(); if (const_opr->type() == T_OBJECT) { - __ push_oop(const_opr->as_jobject()); + __ push_oop(const_opr->as_jobject(), rscratch1); } else if (const_opr->type() == T_INT) { __ push_jint(const_opr->as_jint()); } else { @@ -469,7 +469,7 @@ int LIR_Assembler::emit_unwind_handler() { #else __ get_thread(rax); __ movptr(Address(rsp, 0), rax); - __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); + __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg); #endif __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); } @@ -503,7 +503,7 @@ int LIR_Assembler::emit_deopt_handler() { int offset = code_offset(); InternalAddress here(__ pc()); - __ pushptr(here.addr()); + __ pushptr(here.addr(), rscratch1); __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); __ end_a_stub(); @@ -691,14 +691,16 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { break; case T_OBJECT: - __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); + __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1); break; case T_LONG: // fall through case T_DOUBLE: #ifdef _LP64 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), - lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); + lo_word_offset_in_bytes), + (intptr_t)c->as_jlong_bits(), + rscratch1); #else __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes), c->as_jint_lo_bits()); @@ -746,7 +748,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi } else { if (is_literal_address(addr)) { ShouldNotReachHere(); - __ movoop(as_Address(addr, noreg), c->as_jobject()); + __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1); } else { #ifdef _LP64 __ movoop(rscratch1, c->as_jobject()); @@ -759,7 +761,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi __ movptr(as_Address_lo(addr), rscratch1); } #else - __ movoop(as_Address(addr), c->as_jobject()); + __ movoop(as_Address(addr), c->as_jobject(), noreg); #endif } } @@ -1784,7 +1786,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L #ifdef _LP64 __ push(k_RInfo); #else - __ pushklass(k->constant_encoding()); + __ pushklass(k->constant_encoding(), noreg); #endif // _LP64 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); __ pop(klass_RInfo); @@ -2432,7 +2434,8 @@ void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_ } assert(!tmp->is_valid(), "do not need temporary"); __ andpd(dest->as_xmm_double_reg(), - ExternalAddress((address)double_signmask_pool)); + ExternalAddress((address)double_signmask_pool), + rscratch1); } } break; @@ -2673,7 +2676,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, if (o == NULL) { __ cmpptr(reg1, NULL_WORD); } else { - __ cmpoop(reg1, o); + __ cmpoop(reg1, o, rscratch1); } } else { fatal("unexpected type: %s", basictype_to_str(c->type())); @@ -3035,19 +3038,19 @@ void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { } -void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { +void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); - __ movoop (Address(rsp, offset_from_rsp_in_bytes), o); + __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1); } -void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) { +void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) { assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); - __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m); + __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1); } @@ -3109,7 +3112,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { store_parameter(j_rarg4, 4); #ifndef PRODUCT if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); } #endif __ call(RuntimeAddress(copyfunc_addr)); @@ -3118,7 +3121,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ mov(c_rarg4, j_rarg4); #ifndef PRODUCT if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); } #endif __ call(RuntimeAddress(copyfunc_addr)); @@ -3132,7 +3135,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { #ifndef PRODUCT if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); } #endif __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack @@ -3365,7 +3368,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Label failed; __ testl(rax, rax); __ jcc(Assembler::notZero, failed); - __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1); __ bind(failed); } #endif @@ -3375,7 +3378,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { #ifndef PRODUCT if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1); } #endif @@ -3584,7 +3587,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { ciKlass* receiver = vc_data->receiver(i); if (receiver == NULL) { Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); - __ mov_metadata(recv_addr, known_klass->constant_encoding()); + __ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1); Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); __ addptr(data_addr, DataLayout::counter_increment); return; @@ -3816,7 +3819,8 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); } __ xorps(dest->as_xmm_float_reg(), - ExternalAddress((address)float_signflip_pool)); + ExternalAddress((address)float_signflip_pool), + rscratch1); } } else if (dest->is_double_xmm()) { #ifdef _LP64 @@ -3833,7 +3837,8 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); } __ xorpd(dest->as_xmm_double_reg(), - ExternalAddress((address)double_signflip_pool)); + ExternalAddress((address)double_signflip_pool), + rscratch1); } #ifndef _LP64 } else if (left->is_single_fpu() || left->is_double_fpu()) { diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp index 10270f4fb9a4b..7dae8ba8a5e82 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp @@ -37,8 +37,7 @@ bool is_literal_address(LIR_Address* addr); - // When we need to use something other than rscratch1 use this - // method. + // When we need to use something other than rscratch1 use this method. Address as_Address(LIR_Address* addr, Register tmp); // Record the type of the receiver in ReceiverTypeData diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 4935dc3b8d73e..3ef7e0023dc5d 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -39,7 +39,6 @@ #include "runtime/stubRoutines.hpp" int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { - const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg); const int aligned_mask = BytesPerWord -1; const int hdr_offset = oopDesc::mark_offset_in_bytes(); assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); @@ -55,7 +54,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr null_check_offset = offset(); if (DiagnoseSyncOnValueBasedClasses != 0) { - load_klass(hdr, obj, rklass_decode_tmp); + load_klass(hdr, obj, rscratch1); movl(hdr, Address(hdr, Klass::access_flags_offset())); testl(hdr, JVM_ACC_IS_VALUE_BASED_CLASS); jcc(Assembler::notZero, slow_case); @@ -146,12 +145,11 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len); - Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); #ifdef _LP64 if (UseCompressedClassPointers) { // Take care not to kill klass movptr(t1, klass); - encode_klass_not_null(t1, tmp_encode_klass); + encode_klass_not_null(t1, rscratch1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); } else #endif @@ -286,10 +284,9 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { // check against inline cache assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); int start_offset = offset(); - Register tmp_load_klass = LP64_ONLY(rscratch2) NOT_LP64(noreg); if (UseCompressedClassPointers) { - load_klass(rscratch1, receiver, tmp_load_klass); + load_klass(rscratch1, receiver, rscratch2); cmpptr(rscratch1, iCache); } else { cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp index 374b1ac3be34c..2dc986d8c1ac5 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp @@ -98,12 +98,13 @@ // This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset) void push_jint (jint i) { _rsp_offset++; push(i); } - void push_oop (jobject o) { _rsp_offset++; pushoop(o); } // Seems to always be in wordSize void push_addr (Address a) { _rsp_offset++; pushptr(a); } void push_reg (Register r) { _rsp_offset++; push(r); } void pop_reg (Register r) { _rsp_offset--; pop(r); assert(_rsp_offset >= 0, "stack offset underflow"); } + void push_oop (jobject o, Register rscratch) { _rsp_offset++; pushoop(o, rscratch); } + void dec_stack (int nof_words) { _rsp_offset -= nof_words; assert(_rsp_offset >= 0, "stack offset underflow"); diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp index b26d0c4e487a1..2abbb3a46b960 100644 --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp @@ -77,11 +77,11 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre int call_offset = -1; if (!align_stack) { - set_last_Java_frame(thread, noreg, rbp, NULL); + set_last_Java_frame(thread, noreg, rbp, NULL, rscratch1); } else { address the_pc = pc(); call_offset = offset(); - set_last_Java_frame(thread, noreg, rbp, the_pc); + set_last_Java_frame(thread, noreg, rbp, the_pc, rscratch1); andptr(rsp, -(StackAlignmentInBytes)); // Align stack } @@ -886,7 +886,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { __ get_thread(thread); __ push(thread); #endif // _LP64 - __ set_last_Java_frame(thread, noreg, rbp, NULL); + __ set_last_Java_frame(thread, noreg, rbp, NULL, rscratch1); // do the call __ call(RuntimeAddress(target)); OopMapSet* oop_maps = new OopMapSet(); @@ -1147,9 +1147,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { // load the klass and check the has finalizer flag Label register_finalizer; - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); Register t = rsi; - __ load_klass(t, rax, tmp_load_klass); + __ load_klass(t, rax, rscratch1); __ movl(t, Address(t, Klass::access_flags_offset())); __ testl(t, JVM_ACC_HAS_FINALIZER); __ jcc(Assembler::notZero, register_finalizer); diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index 0c1d0c17de67b..3c370afaa7b3e 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -1649,6 +1649,8 @@ void C2_MacroAssembler::load_vector(XMMRegister dst, Address src, int vlen_in_by } void C2_MacroAssembler::load_vector(XMMRegister dst, AddressLiteral src, int vlen_in_bytes, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { load_vector(dst, as_Address(src), vlen_in_bytes); } else { @@ -1662,21 +1664,21 @@ void C2_MacroAssembler::load_constant_vector(BasicType bt, XMMRegister dst, Inte if (VM_Version::supports_avx()) { if (bt == T_LONG) { if (VM_Version::supports_avx2()) { - vpbroadcastq(dst, src, vlen_enc, noreg); + vpbroadcastq(dst, src, vlen_enc); } else { - vmovddup(dst, src, vlen_enc, noreg); + vmovddup(dst, src, vlen_enc); } } else if (bt == T_DOUBLE) { if (vlen_enc != Assembler::AVX_128bit) { vbroadcastsd(dst, src, vlen_enc, noreg); } else { - vmovddup(dst, src, vlen_enc, noreg); + vmovddup(dst, src, vlen_enc); } } else { if (VM_Version::supports_avx2() && is_integral_type(bt)) { - vpbroadcastd(dst, src, vlen_enc, noreg); + vpbroadcastd(dst, src, vlen_enc); } else { - vbroadcastss(dst, src, vlen_enc, noreg); + vbroadcastss(dst, src, vlen_enc); } } } else if (VM_Version::supports_sse3()) { @@ -2395,25 +2397,25 @@ void C2_MacroAssembler::evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask } } -void C2_MacroAssembler::evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral adr, int comparison, int vector_len, Register rscratch) { - assert(rscratch != noreg || always_reachable(adr), "missing"); +void C2_MacroAssembler::evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral src2, int comparison, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src2), "missing"); switch(typ) { case T_BOOLEAN: case T_BYTE: - evpcmpb(kdmask, ksmask, src1, adr, comparison, /*signed*/ true, vector_len, rscratch); + evpcmpb(kdmask, ksmask, src1, src2, comparison, /*signed*/ true, vector_len, rscratch); break; case T_CHAR: case T_SHORT: - evpcmpw(kdmask, ksmask, src1, adr, comparison, /*signed*/ true, vector_len, rscratch); + evpcmpw(kdmask, ksmask, src1, src2, comparison, /*signed*/ true, vector_len, rscratch); break; case T_INT: case T_FLOAT: - evpcmpd(kdmask, ksmask, src1, adr, comparison, /*signed*/ true, vector_len, rscratch); + evpcmpd(kdmask, ksmask, src1, src2, comparison, /*signed*/ true, vector_len, rscratch); break; case T_LONG: case T_DOUBLE: - evpcmpq(kdmask, ksmask, src1, adr, comparison, /*signed*/ true, vector_len, rscratch); + evpcmpq(kdmask, ksmask, src1, src2, comparison, /*signed*/ true, vector_len, rscratch); break; default: assert(false,"Should not reach here."); @@ -4357,12 +4359,11 @@ void C2_MacroAssembler::masked_op(int ideal_opc, int mask_len, KRegister dst, * If the src is positive infinity or any value greater than or equal to the value of Integer.MAX_VALUE, * the result is equal to the value of Integer.MAX_VALUE. */ -void C2_MacroAssembler::vector_cast_float_special_cases_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, - Register scratch, AddressLiteral float_sign_flip, - int vec_enc) { +void C2_MacroAssembler::vector_cast_float_special_cases_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, + Register rscratch) { Label done; - vmovdqu(xtmp1, float_sign_flip, vec_enc, scratch); + vmovdqu(xtmp1, float_sign_flip, vec_enc, rscratch); vpcmpeqd(xtmp2, dst, xtmp1, vec_enc); vptest(xtmp2, xtmp2, vec_enc); jccb(Assembler::equal, done); @@ -4386,12 +4387,12 @@ void C2_MacroAssembler::vector_cast_float_special_cases_avx(XMMRegister dst, XMM bind(done); } -void C2_MacroAssembler::vector_cast_float_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, - Register scratch, AddressLiteral float_sign_flip, - int vec_enc) { +void C2_MacroAssembler::vector_cast_float_special_cases_evex(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, + KRegister ktmp1, KRegister ktmp2, + Register rscratch) { Label done; - evmovdqul(xtmp1, k0, float_sign_flip, false, vec_enc, scratch); + evmovdqul(xtmp1, k0, float_sign_flip, false, vec_enc, rscratch); Assembler::evpcmpeqd(ktmp1, k0, xtmp1, dst, vec_enc); kortestwl(ktmp1, ktmp1); jccb(Assembler::equal, done); @@ -4407,13 +4408,15 @@ void C2_MacroAssembler::vector_cast_float_special_cases_evex(XMMRegister dst, XM bind(done); } -void C2_MacroAssembler::vector_cast_float_to_long_special_cases_evex( - XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, - Register scratch, AddressLiteral double_sign_flip, - int vec_enc) { +void C2_MacroAssembler::vector_cast_float_to_long_special_cases_evex(XMMRegister dst, XMMRegister src, + AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, + KRegister ktmp1, KRegister ktmp2, + Register rscratch) { + assert(rscratch != noreg || always_reachable(double_sign_flip), "missing"); + Label done; - evmovdquq(xtmp1, k0, double_sign_flip, false, vec_enc, scratch); + evmovdquq(xtmp1, k0, double_sign_flip, false, vec_enc, rscratch); Assembler::evpcmpeqq(ktmp1, k0, xtmp1, dst, vec_enc); kortestwl(ktmp1, ktmp1); jccb(Assembler::equal, done); @@ -4437,12 +4440,14 @@ void C2_MacroAssembler::vector_cast_float_to_long_special_cases_evex( * If the src is positive infinity or any value greater than or equal to the value of Long.MAX_VALUE, * the result is equal to the value of Long.MAX_VALUE. */ -void C2_MacroAssembler::vector_cast_double_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, - Register scratch, AddressLiteral double_sign_flip, - int vec_enc) { +void C2_MacroAssembler::vector_cast_double_special_cases_evex(XMMRegister dst, XMMRegister src, + AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, + Register rscratch) { + assert(rscratch != noreg || always_reachable(double_sign_flip), "missing"); + Label done; - evmovdqul(xtmp1, k0, double_sign_flip, false, vec_enc, scratch); + evmovdqul(xtmp1, k0, double_sign_flip, false, vec_enc, rscratch); evpcmpeqq(ktmp1, xtmp1, dst, vec_enc); kortestwl(ktmp1, ktmp1); jccb(Assembler::equal, done); @@ -4468,38 +4473,48 @@ void C2_MacroAssembler::vector_cast_double_special_cases_evex(XMMRegister dst, X * d) Replace 0x80000000 with MaxInt if source lane contains a +ve value. */ -void C2_MacroAssembler::vector_castD2L_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - Register scratch, int vec_enc) { +void C2_MacroAssembler::vector_castD2L_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch) { + assert(rscratch != noreg || always_reachable(double_sign_flip), "missing"); + evcvttpd2qq(dst, src, vec_enc); - vector_cast_double_special_cases_evex(dst, src, xtmp1, xtmp2, ktmp1, ktmp2, scratch, double_sign_flip, vec_enc); + vector_cast_double_special_cases_evex(dst, src, double_sign_flip, vec_enc, + xtmp1, xtmp2, ktmp1, ktmp2, rscratch); } -void C2_MacroAssembler::vector_castF2I_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, - AddressLiteral float_sign_flip, Register scratch, int vec_enc) { +void C2_MacroAssembler::vector_castF2I_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, Register rscratch) { + assert(rscratch != noreg || always_reachable(float_sign_flip), "missing"); + vcvttps2dq(dst, src, vec_enc); - vector_cast_float_special_cases_avx(dst, src, xtmp1, xtmp2, xtmp3, xtmp4, scratch, float_sign_flip, vec_enc); + vector_cast_float_special_cases_avx(dst, src, float_sign_flip, vec_enc, + xtmp1, xtmp2, xtmp3, xtmp4, rscratch); } -void C2_MacroAssembler::vector_castF2I_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral float_sign_flip, - Register scratch, int vec_enc) { +void C2_MacroAssembler::vector_castF2I_evex(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch) { + assert(rscratch != noreg || always_reachable(float_sign_flip), "missing"); + vcvttps2dq(dst, src, vec_enc); - vector_cast_float_special_cases_evex(dst, src, xtmp1, xtmp2, ktmp1, ktmp2, scratch, float_sign_flip, vec_enc); + vector_cast_float_special_cases_evex(dst, src, float_sign_flip, vec_enc, + xtmp1, xtmp2, ktmp1, ktmp2, rscratch); } -void C2_MacroAssembler::vector_castF2L_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - Register scratch, int vec_enc) { +void C2_MacroAssembler::vector_castF2L_evex(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch) { + assert(rscratch != noreg || always_reachable(float_sign_flip), "missing"); + evcvttps2qq(dst, src, vec_enc); - vector_cast_float_to_long_special_cases_evex(dst, src, xtmp1, xtmp2, ktmp1, ktmp2, scratch, double_sign_flip, vec_enc); + vector_cast_float_to_long_special_cases_evex(dst, src, float_sign_flip, vec_enc, + xtmp1, xtmp2, ktmp1, ktmp2, rscratch); } -void C2_MacroAssembler::vector_castD2X_evex(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, - AddressLiteral double_sign_flip, Register scratch, int vec_enc) { - vector_castD2L_evex(dst, src, xtmp1, xtmp2, ktmp1, ktmp2, double_sign_flip, scratch, vec_enc); +void C2_MacroAssembler::vector_castD2X_evex(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch) { + assert(rscratch != noreg || always_reachable(double_sign_flip), "missing"); + + vector_castD2L_evex(dst, src, double_sign_flip, vec_enc, + xtmp1, xtmp2, ktmp1, ktmp2, rscratch); if (to_elem_bt != T_LONG) { switch(to_elem_bt) { case T_INT: @@ -4519,53 +4534,59 @@ void C2_MacroAssembler::vector_castD2X_evex(BasicType to_elem_bt, XMMRegister ds } #ifdef _LP64 -void C2_MacroAssembler::vector_round_double_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - AddressLiteral new_mxcsr, Register scratch, int vec_enc) { +void C2_MacroAssembler::vector_round_double_evex(XMMRegister dst, XMMRegister src, + AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc, + Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2) { // Perform floor(val+0.5) operation under the influence of MXCSR.RC mode roundTowards -inf. // and re-instantiate original MXCSR.RC mode after that. - ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); - ldmxcsr(new_mxcsr, scratch); - mov64(scratch, julong_cast(0.5L)); - evpbroadcastq(xtmp1, scratch, vec_enc); + ldmxcsr(new_mxcsr, tmp /*rscratch*/); + + mov64(tmp, julong_cast(0.5L)); + evpbroadcastq(xtmp1, tmp, vec_enc); vaddpd(xtmp1, src , xtmp1, vec_enc); evcvtpd2qq(dst, xtmp1, vec_enc); - vector_cast_double_special_cases_evex(dst, src, xtmp1, xtmp2, ktmp1, ktmp2, scratch, double_sign_flip, vec_enc); - ldmxcsr(mxcsr_std, scratch); + vector_cast_double_special_cases_evex(dst, src, double_sign_flip, vec_enc, + xtmp1, xtmp2, ktmp1, ktmp2, tmp); + + ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), tmp /*rscratch*/); } -void C2_MacroAssembler::vector_round_float_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral float_sign_flip, - AddressLiteral new_mxcsr, Register scratch, int vec_enc) { +void C2_MacroAssembler::vector_round_float_evex(XMMRegister dst, XMMRegister src, + AddressLiteral float_sign_flip, AddressLiteral new_mxcsr, int vec_enc, + Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2) { // Perform floor(val+0.5) operation under the influence of MXCSR.RC mode roundTowards -inf. // and re-instantiate original MXCSR.RC mode after that. - ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); - ldmxcsr(new_mxcsr, scratch); - movl(scratch, jint_cast(0.5)); - movq(xtmp1, scratch); + ldmxcsr(new_mxcsr, tmp /*rscratch*/); + + movl(tmp, jint_cast(0.5)); + movq(xtmp1, tmp); vbroadcastss(xtmp1, xtmp1, vec_enc); vaddps(xtmp1, src , xtmp1, vec_enc); vcvtps2dq(dst, xtmp1, vec_enc); - vector_cast_float_special_cases_evex(dst, src, xtmp1, xtmp2, ktmp1, ktmp2, scratch, float_sign_flip, vec_enc); - ldmxcsr(mxcsr_std, scratch); + vector_cast_float_special_cases_evex(dst, src, float_sign_flip, vec_enc, + xtmp1, xtmp2, ktmp1, ktmp2, tmp); + + ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), tmp /*rscratch*/); } -void C2_MacroAssembler::vector_round_float_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - XMMRegister xtmp3, XMMRegister xtmp4, AddressLiteral float_sign_flip, - AddressLiteral new_mxcsr, Register scratch, int vec_enc) { +void C2_MacroAssembler::vector_round_float_avx(XMMRegister dst, XMMRegister src, + AddressLiteral float_sign_flip, AddressLiteral new_mxcsr, int vec_enc, + Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4) { // Perform floor(val+0.5) operation under the influence of MXCSR.RC mode roundTowards -inf. // and re-instantiate original MXCSR.RC mode after that. - ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); - ldmxcsr(new_mxcsr, scratch); - movl(scratch, jint_cast(0.5)); - movq(xtmp1, scratch); + ldmxcsr(new_mxcsr, tmp /*rscratch*/); + + movl(tmp, jint_cast(0.5)); + movq(xtmp1, tmp); vbroadcastss(xtmp1, xtmp1, vec_enc); vaddps(xtmp1, src , xtmp1, vec_enc); vcvtps2dq(dst, xtmp1, vec_enc); - vector_cast_float_special_cases_avx(dst, src, xtmp1, xtmp2, xtmp3, xtmp4, scratch, float_sign_flip, vec_enc); - ldmxcsr(mxcsr_std, scratch); + vector_cast_float_special_cases_avx(dst, src, float_sign_flip, vec_enc, + xtmp1, xtmp2, xtmp3, xtmp4, tmp); + + ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), tmp /*rscratch*/); } -#endif +#endif // _LP64 void C2_MacroAssembler::vector_unsigned_cast(XMMRegister dst, XMMRegister src, int vlen_enc, BasicType from_elem_bt, BasicType to_elem_bt) { @@ -5125,12 +5146,14 @@ void C2_MacroAssembler::vector_reverse_bit(BasicType bt, XMMRegister dst, XMMReg } } -void C2_MacroAssembler::vector_reverse_bit_gfni(BasicType bt, XMMRegister dst, XMMRegister src, - XMMRegister xtmp, AddressLiteral mask, Register rtmp, int vec_enc) { +void C2_MacroAssembler::vector_reverse_bit_gfni(BasicType bt, XMMRegister dst, XMMRegister src, AddressLiteral mask, int vec_enc, + XMMRegister xtmp, Register rscratch) { + assert(VM_Version::supports_gfni(), ""); + assert(rscratch != noreg || always_reachable(mask), "missing"); + // Galois field instruction based bit reversal based on following algorithm. // http://0x80.pl/articles/avx512-galois-field-for-bit-shuffling.html - assert(VM_Version::supports_gfni(), ""); - vpbroadcastq(xtmp, mask, vec_enc, rtmp); + vpbroadcastq(xtmp, mask, vec_enc, rscratch); vgf2p8affineqb(xtmp, src, xtmp, 0, vec_enc); vector_reverse_byte(bt, dst, xtmp, vec_enc); } diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp index 5628429158a94..a7c5cee584782 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp @@ -148,15 +148,16 @@ #endif // blend - void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral adr, int comparison, int vector_len, Register rscratch = rscratch1); - void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, XMMRegister src2, int comparison, int vector_len); + void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, XMMRegister src2, int comparison, int vector_len); + void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, AddressLiteral src2, int comparison, int vector_len, Register rscratch = noreg); void evpblend(BasicType typ, XMMRegister dst, KRegister kmask, XMMRegister src1, XMMRegister src2, bool merge, int vector_len); + void load_vector(XMMRegister dst, Address src, int vlen_in_bytes); + void load_vector(XMMRegister dst, AddressLiteral src, int vlen_in_bytes, Register rscratch = noreg); + void load_vector_mask(XMMRegister dst, XMMRegister src, int vlen_in_bytes, BasicType elem_bt, bool is_legacy); - void load_vector_mask(KRegister dst, XMMRegister src, XMMRegister xtmp, bool novlbwdq, int vlen_enc); + void load_vector_mask(KRegister dst, XMMRegister src, XMMRegister xtmp, bool novlbwdq, int vlen_enc); - void load_vector(XMMRegister dst, Address src, int vlen_in_bytes); - void load_vector(XMMRegister dst, AddressLiteral src, int vlen_in_bytes, Register rscratch = rscratch1); void load_constant_vector(BasicType bt, XMMRegister dst, InternalAddress src, int vlen); void load_iota_indices(XMMRegister dst, int vlen_in_bytes); @@ -307,60 +308,47 @@ void masked_op(int ideal_opc, int mask_len, KRegister dst, KRegister src1, KRegister src2); - void vector_castF2I_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, - AddressLiteral float_sign_flip, Register scratch, int vec_enc); + void vector_castF2I_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, Register rscratch = noreg); - void vector_castF2I_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral float_sign_flip, - Register scratch, int vec_enc); + void vector_castF2I_evex(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg); - void vector_castF2L_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - Register scratch, int vec_enc); + void vector_castF2L_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg); - void vector_castD2L_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - Register scratch, int vec_enc); + void vector_castD2L_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg ); - void vector_castD2X_evex(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - Register scratch, int vec_enc); + void vector_castD2X_evex(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg); - void vector_unsigned_cast(XMMRegister dst, XMMRegister src, int vlen_enc, - BasicType from_elem_bt, BasicType to_elem_bt); + void vector_unsigned_cast(XMMRegister dst, XMMRegister src, int vlen_enc, BasicType from_elem_bt, BasicType to_elem_bt); - void vector_cast_double_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, Register scratch, AddressLiteral double_sign_flip, - int vec_enc); + void vector_cast_double_special_cases_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg ); - void vector_cast_float_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, Register scratch, AddressLiteral float_sign_flip, - int vec_enc); + void vector_cast_float_special_cases_evex(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, Register rscratch = noreg); - void vector_cast_float_to_long_special_cases_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, - Register scratch, AddressLiteral double_sign_flip, - int vec_enc); + void vector_cast_float_to_long_special_cases_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2, + Register rscratch = noreg); - void vector_cast_float_special_cases_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, - XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, - Register scratch, AddressLiteral float_sign_flip, - int vec_enc); + void vector_cast_float_special_cases_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, int vec_enc, + XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, + Register rscratch = noreg); #ifdef _LP64 - void vector_round_double_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - AddressLiteral new_mxcsr, Register scratch, int vec_enc); + void vector_round_double_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc, + Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2); - void vector_round_float_evex(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - KRegister ktmp1, KRegister ktmp2, AddressLiteral double_sign_flip, - AddressLiteral new_mxcsr, Register scratch, int vec_enc); + void vector_round_float_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc, + Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2); - void vector_round_float_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, - XMMRegister xtmp3, XMMRegister xtmp4, AddressLiteral float_sign_flip, - AddressLiteral new_mxcsr, Register scratch, int vec_enc); -#endif + void vector_round_float_avx(XMMRegister dst, XMMRegister src, AddressLiteral float_sign_flip, AddressLiteral new_mxcsr, int vec_enc, + Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4); +#endif // _LP64 void udivI(Register rax, Register divisor, Register rdx); void umodI(Register rax, Register divisor, Register rdx); @@ -385,8 +373,8 @@ void vector_reverse_bit(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, Register rtmp, int vec_enc); - void vector_reverse_bit_gfni(BasicType bt, XMMRegister dst, XMMRegister src, XMMRegister xtmp, - AddressLiteral mask, Register rtmp, int vec_enc); + void vector_reverse_bit_gfni(BasicType bt, XMMRegister dst, XMMRegister src, AddressLiteral mask, int vec_enc, + XMMRegister xtmp, Register rscratch = noreg); void vector_reverse_byte(BasicType bt, XMMRegister dst, XMMRegister src, int vec_enc); diff --git a/src/hotspot/cpu/x86/compiledIC_x86.cpp b/src/hotspot/cpu/x86/compiledIC_x86.cpp index b8ad469805ddc..3254adf941293 100644 --- a/src/hotspot/cpu/x86/compiledIC_x86.cpp +++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp @@ -127,7 +127,7 @@ void CompiledDirectStaticCall::verify() { _call->verify_alignment(); #ifdef ASSERT - CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call); + CodeBlob *cb = CodeCache::find_blob((address) _call); assert(cb != NULL, "sanity"); #endif diff --git a/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp b/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp index d2fab74c2884e..5d1e5b280af17 100644 --- a/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp +++ b/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp @@ -190,7 +190,7 @@ void DowncallStubGenerator::generate() { address the_pc = __ pc(); __ block_comment("{ thread java2native"); - __ set_last_Java_frame(rsp, rbp, (address)the_pc); + __ set_last_Java_frame(rsp, rbp, (address)the_pc, rscratch1); OopMap* map = new OopMap(_framesize, 0); _oop_maps->add_gc_map(the_pc - start, map); @@ -246,7 +246,7 @@ void DowncallStubGenerator::generate() { } __ block_comment("{ thread native2java"); - __ restore_cpu_control_state_after_jni(); + __ restore_cpu_control_state_after_jni(rscratch1); __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp index 758f461b3215b..123942d8875c1 100644 --- a/src/hotspot/cpu/x86/frame_x86.cpp +++ b/src/hotspot/cpu/x86/frame_x86.cpp @@ -177,16 +177,11 @@ bool frame::safe_for_sender(JavaThread *thread) { } // We must always be able to find a recognizable pc - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); if (sender_pc == NULL || sender_blob == NULL) { return false; } - // Could be a zombie method - if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { - return false; - } - // Could just be some random pointer within the codeBlob if (!sender_blob->code_contains(sender_pc)) { return false; diff --git a/src/hotspot/cpu/x86/frame_x86.inline.hpp b/src/hotspot/cpu/x86/frame_x86.inline.hpp index 6b228f4a52a92..9425d17817585 100644 --- a/src/hotspot/cpu/x86/frame_x86.inline.hpp +++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp @@ -153,10 +153,8 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) { DEBUG_ONLY(_frame_index = -1;) // Here's a sticky one. This constructor can be called via AsyncGetCallTrace - // when last_Java_sp is non-null but the pc fetched is junk. If we are truly - // unlucky the junk value could be to a zombied method and we'll die on the - // find_blob call. This is also why we can have no asserts on the validity - // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler + // when last_Java_sp is non-null but the pc fetched is junk. + // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler // -> pd_last_frame should use a specialized version of pd_last_frame which could // call a specialized frame constructor instead of this one. // Then we could use the assert below. However this assert is of somewhat dubious diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp index 6a116aa351f00..c1cba877f62ff 100644 --- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp @@ -331,15 +331,12 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { __ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters. __ jcc(Assembler::equal, bad_call); -#ifdef _LP64 - Register tmp1 = rscratch1; - Register tmp2 = rscratch2; -#else - Register tmp1 = rax; - Register tmp2 = rcx; + Register tmp1 = LP64_ONLY( rscratch1 ) NOT_LP64( rax ); + Register tmp2 = LP64_ONLY( rscratch2 ) NOT_LP64( rcx ); +#ifndef _LP64 __ push(tmp1); __ push(tmp2); -#endif // _LP64 +#endif // !_LP64 // Pointer chase to the method holder to find out if the method is concurrently unloading. Label method_live; diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp index f314cac5980b7..8d8753647456c 100644 --- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp @@ -112,7 +112,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob // entry and that entry is not properly handled by the relocation code. AddressLiteral cardtable((address)byte_map_base, relocInfo::none); Address index(noreg, obj, Address::times_1); - card_addr = __ as_Address(ArrayAddress(cardtable, index)); + card_addr = __ as_Address(ArrayAddress(cardtable, index), rscratch1); } int dirty = CardTable::dirty_card_val(); diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index 516a502d4c98e..70338f3420826 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -59,8 +59,7 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md jmpb(next); bind(update); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - load_klass(obj, obj, tmp_load_klass); + load_klass(obj, obj, rscratch1); xorptr(obj, mdo_addr); testptr(obj, TypeEntries::type_klass_mask); @@ -880,13 +879,13 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, jccb(Assembler::zero, no_safepoint); ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index); - jump(dispatch_addr); + jump(dispatch_addr, noreg); bind(no_safepoint); } { ArrayAddress dispatch_addr(ExternalAddress((address)table), index); - jump(dispatch_addr); + jump(dispatch_addr, noreg); } #endif // _LP64 } @@ -1003,7 +1002,7 @@ void InterpreterMacroAssembler::remove_activation( jmp(fast_path); bind(slow_path); push(state); - set_last_Java_frame(rthread, noreg, rbp, (address)pc()); + set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1); super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread); NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore reset_last_Java_frame(rthread, true); @@ -1207,7 +1206,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { const Register swap_reg = rax; // Must use rax for cmpxchg instruction const Register tmp_reg = rbx; const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop - const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg); + const Register rklass_decode_tmp = rscratch1; const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); @@ -2012,7 +2011,7 @@ void InterpreterMacroAssembler::notify_method_entry() { } { - SkipIfEqual skip(this, &DTraceMethodProbes, false); + SkipIfEqual skip(this, &DTraceMethodProbes, false, rscratch1); NOT_LP64(get_thread(rthread);) get_method(rarg); call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), @@ -2057,7 +2056,7 @@ void InterpreterMacroAssembler::notify_method_exit( } { - SkipIfEqual skip(this, &DTraceMethodProbes, false); + SkipIfEqual skip(this, &DTraceMethodProbes, false, rscratch1); push(state); NOT_LP64(get_thread(rthread);) get_method(rarg); diff --git a/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp b/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp index d45860c7c46aa..c79cce0d4b76d 100644 --- a/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp +++ b/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp @@ -48,9 +48,6 @@ static const Register robj = r9; static const Register roffset = r10; static const Register rcounter = r11; -// Warning: do not use rip relative addressing after the first counter load -// since that may scratch r10! - address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { const char *name = NULL; switch (type) { @@ -77,10 +74,9 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { __ jcc (Assembler::notZero, slow); if (JvmtiExport::can_post_field_access()) { - // Check to see if a field access watch has been set before we - // take the fast path. - assert_different_registers(rscratch1, robj, rcounter); // cmp32 clobbers rscratch1! - __ cmp32(ExternalAddress((address) JvmtiExport::get_field_access_count_addr()), 0); + // Check to see if a field access watch has been set before we take the fast path. + assert_different_registers(rscratch1, robj, rcounter); + __ cmp32(ExternalAddress(JvmtiExport::get_field_access_count_addr()), 0, rscratch1); __ jcc(Assembler::notZero, slow); } @@ -104,7 +100,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { default: ShouldNotReachHere(); } - __ cmp32 (rcounter, counter); + __ cmp32 (rcounter, counter, rscratch1); __ jcc (Assembler::notEqual, slow); __ ret (0); @@ -122,7 +118,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { default: break; } // tail call - __ jump (ExternalAddress(slow_case_addr)); + __ jump (ExternalAddress(slow_case_addr), rscratch1); __ flush (); @@ -177,7 +173,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { if (JvmtiExport::can_post_field_access()) { // Check to see if a field access watch has been set before we // take the fast path. - __ cmp32(ExternalAddress((address) JvmtiExport::get_field_access_count_addr()), 0); + __ cmp32(ExternalAddress(JvmtiExport::get_field_access_count_addr()), 0, rscratch1); __ jcc(Assembler::notZero, slow); } @@ -196,7 +192,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { case T_DOUBLE: __ movdbl (xmm0, Address(robj, roffset, Address::times_1)); break; default: ShouldNotReachHere(); } - __ cmp32 (rcounter, counter); + __ cmp32 (rcounter, counter, rscratch1); __ jcc (Assembler::notEqual, slow); __ ret (0); @@ -210,7 +206,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { default: break; } // tail call - __ jump (ExternalAddress(slow_case_addr)); + __ jump (ExternalAddress(slow_case_addr), rscratch1); __ flush (); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 285ee8572113b..46af749f6cc54 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -102,7 +102,8 @@ Address MacroAssembler::as_Address(AddressLiteral adr) { return Address(adr.target(), adr.rspec()); } -Address MacroAssembler::as_Address(ArrayAddress adr) { +Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { + assert(rscratch == noreg, ""); return Address::make_array(adr); } @@ -125,7 +126,8 @@ void MacroAssembler::cmpoop(Address src1, jobject obj) { cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); } -void MacroAssembler::cmpoop(Register src1, jobject obj) { +void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { + assert(rscratch == noreg, "redundant"); cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); } @@ -161,8 +163,9 @@ void MacroAssembler::jnC2(Register tmp, Label& L) { // 32bit can do a case table jump in one instruction but we no longer allow the base // to be installed in the Address class -void MacroAssembler::jump(ArrayAddress entry) { - jmp(as_Address(entry)); +void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { + assert(rscratch == noreg, "not needed"); + jmp(as_Address(entry, noreg)); } // Note: y_lo will be destroyed @@ -195,7 +198,9 @@ void MacroAssembler::lea(Register dst, AddressLiteral src) { mov_literal32(dst, (int32_t)src.target(), src.rspec()); } -void MacroAssembler::lea(Address dst, AddressLiteral adr) { +void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { + assert(rscratch == noreg, "not needed"); + // leal(dst, as_Address(adr)); // see note in movl as to why we must use a move mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); @@ -298,7 +303,8 @@ void MacroAssembler::movoop(Register dst, jobject obj) { mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); } -void MacroAssembler::movoop(Address dst, jobject obj) { +void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { + assert(rscratch == noreg, "redundant"); mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); } @@ -306,7 +312,8 @@ void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); } -void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { +void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { + assert(rscratch == noreg, "redundant"); mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); } @@ -318,28 +325,32 @@ void MacroAssembler::movptr(Register dst, AddressLiteral src) { } } -void MacroAssembler::movptr(ArrayAddress dst, Register src) { - movl(as_Address(dst), src); +void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { + assert(rscratch == noreg, "redundant"); + movl(as_Address(dst, noreg), src); } void MacroAssembler::movptr(Register dst, ArrayAddress src) { - movl(dst, as_Address(src)); + movl(dst, as_Address(src, noreg)); } -// src should NEVER be a real pointer. Use AddressLiteral for true pointers -void MacroAssembler::movptr(Address dst, intptr_t src) { +void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { + assert(rscratch == noreg, "redundant"); movl(dst, src); } -void MacroAssembler::pushoop(jobject obj) { +void MacroAssembler::pushoop(jobject obj, Register rscratch) { + assert(rscratch == noreg, "redundant"); push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); } -void MacroAssembler::pushklass(Metadata* obj) { +void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { + assert(rscratch == noreg, "redundant"); push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); } -void MacroAssembler::pushptr(AddressLiteral src) { +void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { + assert(rscratch == noreg, "redundant"); if (src.is_lval()) { push_literal32((int32_t)src.target(), src.rspec()); } else { @@ -432,9 +443,9 @@ void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, } void MacroAssembler::stop(const char* msg) { - ExternalAddress message((address)msg); // push address of message - pushptr(message.addr()); + ExternalAddress message((address)msg); + pushptr(message.addr(), noreg); { Label L; call(L, relocInfo::none); bind(L); } // push eip pusha(); // push registers call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); @@ -444,9 +455,9 @@ void MacroAssembler::stop(const char* msg) { void MacroAssembler::warn(const char* msg) { push_CPU_state(); - ExternalAddress message((address) msg); // push address of message - pushptr(message.addr()); + ExternalAddress message((address)msg); + pushptr(message.addr(), noreg); call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); addl(rsp, wordSize); // discard argument @@ -479,12 +490,12 @@ Address MacroAssembler::as_Address(AddressLiteral adr) { } -Address MacroAssembler::as_Address(ArrayAddress adr) { +Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { AddressLiteral base = adr.base(); - lea(rscratch1, base); + lea(rscratch, base); Address index = adr.index(); assert(index._disp == 0, "must not have disp"); // maybe it can? - Address array(rscratch1, index._index, index._scale, index._disp); + Address array(rscratch, index._index, index._scale, index._disp); return array; } @@ -548,7 +559,7 @@ int MacroAssembler::corrected_idivq(Register reg) { Label normal_case, special_case; // check for special case - cmp64(rax, ExternalAddress((address) &min_long)); + cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); jcc(Assembler::notEqual, normal_case); xorl(rdx, rdx); // prepare rdx for possible special case (where // remainder = 0) @@ -583,12 +594,14 @@ void MacroAssembler::decrementq(Address dst, int value) { /* else */ { subq(dst, value) ; return; } } -void MacroAssembler::incrementq(AddressLiteral dst) { +void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { + assert(rscratch != noreg || always_reachable(dst), "missing"); + if (reachable(dst)) { incrementq(as_Address(dst)); } else { - lea(rscratch1, dst); - incrementq(Address(rscratch1, 0)); + lea(rscratch, dst); + incrementq(Address(rscratch, 0)); } } @@ -610,11 +623,11 @@ void MacroAssembler::incrementq(Address dst, int value) { // 32bit can do a case table jump in one instruction but we no longer allow the base // to be installed in the Address class -void MacroAssembler::jump(ArrayAddress entry) { - lea(rscratch1, entry.base()); +void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { + lea(rscratch, entry.base()); Address dispatch = entry.index(); assert(dispatch._base == noreg, "must be"); - dispatch._base = rscratch1; + dispatch._base = rscratch; jmp(dispatch); } @@ -624,12 +637,12 @@ void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Regis } void MacroAssembler::lea(Register dst, AddressLiteral src) { - mov_literal64(dst, (intptr_t)src.target(), src.rspec()); + mov_literal64(dst, (intptr_t)src.target(), src.rspec()); } -void MacroAssembler::lea(Address dst, AddressLiteral adr) { - mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec()); - movptr(dst, rscratch1); +void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { + lea(rscratch, adr); + movptr(dst, rscratch); } void MacroAssembler::leave() { @@ -646,18 +659,18 @@ void MacroAssembler::movoop(Register dst, jobject obj) { mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); } -void MacroAssembler::movoop(Address dst, jobject obj) { - mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate()); - movq(dst, rscratch1); +void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { + mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); + movq(dst, rscratch); } void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); } -void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { - mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); - movq(dst, rscratch1); +void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { + mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); + movq(dst, rscratch); } void MacroAssembler::movptr(Register dst, AddressLiteral src) { @@ -673,45 +686,40 @@ void MacroAssembler::movptr(Register dst, AddressLiteral src) { } } -void MacroAssembler::movptr(ArrayAddress dst, Register src) { - movq(as_Address(dst), src); +void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { + movq(as_Address(dst, rscratch), src); } void MacroAssembler::movptr(Register dst, ArrayAddress src) { - movq(dst, as_Address(src)); + movq(dst, as_Address(src, dst /*rscratch*/)); } // src should NEVER be a real pointer. Use AddressLiteral for true pointers -void MacroAssembler::movptr(Address dst, intptr_t src) { +void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { if (is_simm32(src)) { movptr(dst, checked_cast(src)); } else { - mov64(rscratch1, src); - movq(dst, rscratch1); + mov64(rscratch, src); + movq(dst, rscratch); } } -// These are mostly for initializing NULL -void MacroAssembler::movptr(Address dst, int32_t src) { - movslq(dst, src); -} - -void MacroAssembler::pushoop(jobject obj) { - movoop(rscratch1, obj); - push(rscratch1); +void MacroAssembler::pushoop(jobject obj, Register rscratch) { + movoop(rscratch, obj); + push(rscratch); } -void MacroAssembler::pushklass(Metadata* obj) { - mov_metadata(rscratch1, obj); - push(rscratch1); +void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { + mov_metadata(rscratch, obj); + push(rscratch); } -void MacroAssembler::pushptr(AddressLiteral src) { - lea(rscratch1, src); +void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { + lea(rscratch, src); if (src.is_lval()) { - push(rscratch1); + push(rscratch); } else { - pushq(Address(rscratch1, 0)); + pushq(Address(rscratch, 0)); } } @@ -721,28 +729,9 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp) { void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_java_fp, - address last_java_pc) { - vzeroupper(); - // determine last_java_sp register - if (!last_java_sp->is_valid()) { - last_java_sp = rsp; - } - - // last_java_fp is optional - if (last_java_fp->is_valid()) { - movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), - last_java_fp); - } - - // last_java_pc is optional - if (last_java_pc != NULL) { - Address java_pc(r15_thread, - JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); - lea(rscratch1, InternalAddress(last_java_pc)); - movptr(java_pc, rscratch1); - } - - movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); + address last_java_pc, + Register rscratch) { + set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); } static void pass_arg0(MacroAssembler* masm, Register arg) { @@ -787,9 +776,10 @@ void MacroAssembler::warn(const char* msg) { movq(rbp, rsp); andq(rsp, -16); // align stack as required by push_CPU_state and call push_CPU_state(); // keeps alignment at 16 bytes + lea(c_rarg0, ExternalAddress((address) msg)); - lea(rax, ExternalAddress(CAST_FROM_FN_PTR(address, warning))); - call(rax); + call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); + pop_CPU_state(); mov(rsp, rbp); pop(rbp); @@ -1197,25 +1187,29 @@ void MacroAssembler::pop_d(XMMRegister r) { addptr(rsp, 2 * Interpreter::stackElementSize); } -void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) { +void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { // Used in sign-masking with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::andpd(dst, as_Address(src)); } else { - lea(scratch_reg, src); - Assembler::andpd(dst, Address(scratch_reg, 0)); + lea(rscratch, src); + Assembler::andpd(dst, Address(rscratch, 0)); } } -void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register scratch_reg) { +void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { // Used in sign-masking with aligned address. assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::andps(dst, as_Address(src)); } else { - lea(scratch_reg, src); - Assembler::andps(dst, Address(scratch_reg, 0)); + lea(rscratch, src); + Assembler::andps(dst, Address(rscratch, 0)); } } @@ -1228,12 +1222,14 @@ void MacroAssembler::atomic_incl(Address counter_addr) { incrementl(counter_addr); } -void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) { +void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { + assert(rscratch != noreg || always_reachable(counter_addr), "missing"); + if (reachable(counter_addr)) { atomic_incl(as_Address(counter_addr)); } else { - lea(scr, counter_addr); - atomic_incl(Address(scr, 0)); + lea(rscratch, counter_addr); + atomic_incl(Address(rscratch, 0)); } } @@ -1243,12 +1239,14 @@ void MacroAssembler::atomic_incq(Address counter_addr) { incrementq(counter_addr); } -void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) { +void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { + assert(rscratch != noreg || always_reachable(counter_addr), "missing"); + if (reachable(counter_addr)) { atomic_incq(as_Address(counter_addr)); } else { - lea(scr, counter_addr); - atomic_incq(Address(scr, 0)); + lea(rscratch, counter_addr); + atomic_incq(Address(rscratch, 0)); } } #endif @@ -1281,19 +1279,19 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { } void MacroAssembler::reserved_stack_check() { - // testing if reserved zone needs to be enabled - Label no_reserved_zone_enabling; - Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); - NOT_LP64(get_thread(rsi);) + // testing if reserved zone needs to be enabled + Label no_reserved_zone_enabling; + Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); + NOT_LP64(get_thread(rsi);) - cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); - jcc(Assembler::below, no_reserved_zone_enabling); + cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); + jcc(Assembler::below, no_reserved_zone_enabling); - call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); - jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); - should_not_reach_here(); + call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); + jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); + should_not_reach_here(); - bind(no_reserved_zone_enabling); + bind(no_reserved_zone_enabling); } void MacroAssembler::c2bool(Register x) { @@ -1314,12 +1312,14 @@ void MacroAssembler::call(Register entry) { Assembler::call(entry); } -void MacroAssembler::call(AddressLiteral entry) { +void MacroAssembler::call(AddressLiteral entry, Register rscratch) { + assert(rscratch != noreg || always_reachable(entry), "missing"); + if (reachable(entry)) { Assembler::call_literal(entry.target(), entry.rspec()); } else { - lea(rscratch1, entry); - Assembler::call(rscratch1); + lea(rscratch, entry); + Assembler::call(rscratch); } } @@ -1549,7 +1549,7 @@ void MacroAssembler::call_VM_base(Register oop_result, assert(last_java_sp != rbp, "can't use ebp/rbp"); // Only interpreter should have to set fp - set_last_Java_frame(java_thread, last_java_sp, rbp, NULL); + set_last_Java_frame(java_thread, last_java_sp, rbp, NULL, rscratch1); // do the call, remove parameters MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); @@ -1729,22 +1729,26 @@ void MacroAssembler::check_and_handle_earlyret(Register java_thread) { void MacroAssembler::check_and_handle_popframe(Register java_thread) { } -void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) { +void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { + assert(rscratch != noreg || always_reachable(src1), "missing"); + if (reachable(src1)) { cmpl(as_Address(src1), imm); } else { - lea(rscratch1, src1); - cmpl(Address(rscratch1, 0), imm); + lea(rscratch, src1); + cmpl(Address(rscratch, 0), imm); } } -void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { +void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { assert(!src2.is_lval(), "use cmpptr"); + assert(rscratch != noreg || always_reachable(src2), "missing"); + if (reachable(src2)) { cmpl(src1, as_Address(src2)); } else { - lea(rscratch1, src2); - cmpl(src1, Address(rscratch1, 0)); + lea(rscratch, src2); + cmpl(src1, Address(rscratch, 0)); } } @@ -1801,27 +1805,32 @@ void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, } -void MacroAssembler::cmp8(AddressLiteral src1, int imm) { +void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { + assert(rscratch != noreg || always_reachable(src1), "missing"); + if (reachable(src1)) { cmpb(as_Address(src1), imm); } else { - lea(rscratch1, src1); - cmpb(Address(rscratch1, 0), imm); + lea(rscratch, src1); + cmpb(Address(rscratch, 0), imm); } } -void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { +void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { #ifdef _LP64 + assert(rscratch != noreg || always_reachable(src2), "missing"); + if (src2.is_lval()) { - movptr(rscratch1, src2); - Assembler::cmpq(src1, rscratch1); + movptr(rscratch, src2); + Assembler::cmpq(src1, rscratch); } else if (reachable(src2)) { cmpq(src1, as_Address(src2)); } else { - lea(rscratch1, src2); - Assembler::cmpq(src1, Address(rscratch1, 0)); + lea(rscratch, src2); + Assembler::cmpq(src1, Address(rscratch, 0)); } #else + assert(rscratch == noreg, "not needed"); if (src2.is_lval()) { cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); } else { @@ -1830,13 +1839,14 @@ void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { #endif // _LP64 } -void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { +void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { assert(src2.is_lval(), "not a mem-mem compare"); #ifdef _LP64 // moves src2's literal address - movptr(rscratch1, src2); - Assembler::cmpq(src1, rscratch1); + movptr(rscratch, src2); + Assembler::cmpq(src1, rscratch); #else + assert(rscratch == noreg, "not needed"); cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); #endif // _LP64 } @@ -1850,20 +1860,22 @@ void MacroAssembler::cmpoop(Register src1, Address src2) { } #ifdef _LP64 -void MacroAssembler::cmpoop(Register src1, jobject src2) { - movoop(rscratch1, src2); - cmpptr(src1, rscratch1); +void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { + movoop(rscratch, src2); + cmpptr(src1, rscratch); } #endif -void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) { +void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { + assert(rscratch != noreg || always_reachable(adr), "missing"); + if (reachable(adr)) { lock(); cmpxchgptr(reg, as_Address(adr)); } else { - lea(rscratch1, adr); + lea(rscratch, adr); lock(); - cmpxchgptr(reg, Address(rscratch1, 0)); + cmpxchgptr(reg, Address(rscratch, 0)); } } @@ -1871,31 +1883,37 @@ void MacroAssembler::cmpxchgptr(Register reg, Address adr) { LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); } -void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::comisd(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::comisd(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::comisd(dst, Address(rscratch, 0)); } } -void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::comiss(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::comiss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::comiss(dst, Address(rscratch, 0)); } } -void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { +void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { + assert(rscratch != noreg || always_reachable(counter_addr), "missing"); + Condition negated_cond = negate_condition(cond); Label L; jcc(negated_cond, L); pushf(); // Preserve flags - atomic_incl(counter_addr); + atomic_incl(counter_addr, rscratch); popf(); bind(L); } @@ -1971,21 +1989,25 @@ void MacroAssembler::division_with_shift (Register reg, int shift_value) { sarl(reg, shift_value); } -void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::divsd(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::divsd(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::divsd(dst, Address(rscratch, 0)); } } -void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::divss(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::divss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::divss(dst, Address(rscratch, 0)); } } @@ -2095,7 +2117,7 @@ void MacroAssembler::fld_s(AddressLiteral src) { } void MacroAssembler::fldcw(AddressLiteral src) { - Assembler::fldcw(as_Address(src)); + fldcw(as_Address(src)); } void MacroAssembler::fpop() { @@ -2235,17 +2257,19 @@ void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegiste } } -void MacroAssembler::incrementl(AddressLiteral dst) { +void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { + assert(rscratch != noreg || always_reachable(dst), "missing"); + if (reachable(dst)) { incrementl(as_Address(dst)); } else { - lea(rscratch1, dst); - incrementl(Address(rscratch1, 0)); + lea(rscratch, dst); + incrementl(Address(rscratch, 0)); } } -void MacroAssembler::incrementl(ArrayAddress dst) { - incrementl(as_Address(dst)); +void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { + incrementl(as_Address(dst, rscratch)); } void MacroAssembler::incrementl(Register reg, int value) { @@ -2264,16 +2288,20 @@ void MacroAssembler::incrementl(Address dst, int value) { /* else */ { addl(dst, value) ; return; } } -void MacroAssembler::jump(AddressLiteral dst) { +void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { + assert(rscratch != noreg || always_reachable(dst), "missing"); + if (reachable(dst)) { jmp_literal(dst.target(), dst.rspec()); } else { - lea(rscratch1, dst); - jmp(rscratch1); + lea(rscratch, dst); + jmp(rscratch); } } -void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { +void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { + assert(rscratch != noreg || always_reachable(dst), "missing"); + if (reachable(dst)) { InstructionMark im(this); relocate(dst.reloc()); @@ -2296,22 +2324,20 @@ void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { #endif /* ASSERT */ Label skip; jccb(reverse[cc], skip); - lea(rscratch1, dst); - Assembler::jmp(rscratch1); + lea(rscratch, dst); + Assembler::jmp(rscratch); bind(skip); } } -void MacroAssembler::fld_x(AddressLiteral src) { - Assembler::fld_x(as_Address(src)); -} +void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); -void MacroAssembler::ldmxcsr(AddressLiteral src, Register scratchReg) { if (reachable(src)) { Assembler::ldmxcsr(as_Address(src)); } else { - lea(scratchReg, src); - Assembler::ldmxcsr(Address(scratchReg, 0)); + lea(rscratch, src); + Assembler::ldmxcsr(Address(rscratch, 0)); } } @@ -2415,12 +2441,14 @@ void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in } } -void MacroAssembler::mov32(AddressLiteral dst, Register src) { +void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { + assert(rscratch != noreg || always_reachable(dst), "missing"); + if (reachable(dst)) { movl(as_Address(dst), src); } else { - lea(rscratch1, dst); - movl(Address(rscratch1, 0), src); + lea(rscratch, dst); + movl(Address(rscratch, 0), src); } } @@ -2428,8 +2456,8 @@ void MacroAssembler::mov32(Register dst, AddressLiteral src) { if (reachable(src)) { movl(dst, as_Address(src)); } else { - lea(rscratch1, src); - movl(dst, Address(rscratch1, 0)); + lea(dst, src); + movl(dst, Address(dst, 0)); } } @@ -2471,12 +2499,9 @@ void MacroAssembler::movbool(Address dst, Register src) { ShouldNotReachHere(); } -void MacroAssembler::movbyte(ArrayAddress dst, int src) { - movb(as_Address(dst), src); -} - void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { movdl(dst, as_Address(src)); } else { @@ -2487,6 +2512,7 @@ void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratc void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { movq(dst, as_Address(src)); } else { @@ -2495,7 +2521,9 @@ void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch } } -void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { if (UseXmmLoadAndClearUpper) { movsd (dst, as_Address(src)); @@ -2503,21 +2531,23 @@ void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { movlpd(dst, as_Address(src)); } } else { - lea(rscratch1, src); + lea(rscratch, src); if (UseXmmLoadAndClearUpper) { - movsd (dst, Address(rscratch1, 0)); + movsd (dst, Address(rscratch, 0)); } else { - movlpd(dst, Address(rscratch1, 0)); + movlpd(dst, Address(rscratch, 0)); } } } -void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { movss(dst, as_Address(src)); } else { - lea(rscratch1, src); - movss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + movss(dst, Address(rscratch, 0)); } } @@ -2538,6 +2568,10 @@ void MacroAssembler::movptr(Address dst, Register src) { LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); } +void MacroAssembler::movptr(Address dst, int32_t src) { + LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); +} + void MacroAssembler::movdqu(Address dst, XMMRegister src) { assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); Assembler::movdqu(dst, src); @@ -2580,7 +2614,7 @@ void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { } void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { - assert(rscratch != noreg || always_reachable(src), "missing"); + assert(rscratch != noreg || always_reachable(src), "missing"); if (reachable(src)) { vmovdqu(dst, as_Address(src)); @@ -2592,7 +2626,7 @@ void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscra } void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { - assert(rscratch != noreg || always_reachable(src), "missing"); + assert(rscratch != noreg || always_reachable(src), "missing"); if (vector_len == AVX_512bit) { evmovdquq(dst, src, AVX_512bit, rscratch); @@ -2648,12 +2682,14 @@ void MacroAssembler::kmov(KRegister dst, Register src) { } } -void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register scratch_reg) { +void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { kmovql(dst, as_Address(src)); } else { - lea(scratch_reg, src); - kmovql(dst, Address(scratch_reg, 0)); + lea(rscratch, src); + kmovql(dst, Address(rscratch, 0)); } } @@ -2669,22 +2705,26 @@ void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch } void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, - int vector_len, Register scratch_reg) { + int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); } else { - lea(scratch_reg, src); - Assembler::evmovdqub(dst, mask, Address(scratch_reg, 0), merge, vector_len); + lea(rscratch, src); + Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); } } void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, - int vector_len, Register scratch_reg) { + int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); } else { - lea(scratch_reg, src); - Assembler::evmovdquw(dst, mask, Address(scratch_reg, 0), merge, vector_len); + lea(rscratch, src); + Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); } } @@ -2699,8 +2739,7 @@ void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral s } } -void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, - int vector_len, Register rscratch) { +void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { assert(rscratch != noreg || always_reachable(src), "missing"); if (reachable(src)) { @@ -2722,12 +2761,14 @@ void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_l } } -void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::movdqa(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::movdqa(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::movdqa(dst, Address(rscratch, 0)); } } @@ -2742,16 +2783,20 @@ void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratc } } -void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::movss(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::movss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::movss(dst, Address(rscratch, 0)); } } void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::movddup(dst, as_Address(src)); } else { @@ -2761,6 +2806,8 @@ void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscra } void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::vmovddup(dst, as_Address(src), vector_len); } else { @@ -2780,12 +2827,14 @@ void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratc } } -void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::mulss(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::mulss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::mulss(dst, Address(rscratch, 0)); } } @@ -3025,7 +3074,8 @@ void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool void MacroAssembler::set_last_Java_frame(Register java_thread, Register last_java_sp, Register last_java_fp, - address last_java_pc) { + address last_java_pc, + Register rscratch) { vzeroupper(); // determine java_thread register if (!java_thread->is_valid()) { @@ -3036,20 +3086,15 @@ void MacroAssembler::set_last_Java_frame(Register java_thread, if (!last_java_sp->is_valid()) { last_java_sp = rsp; } - // last_java_fp is optional - if (last_java_fp->is_valid()) { movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); } - // last_java_pc is optional - if (last_java_pc != NULL) { - lea(Address(java_thread, - JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), - InternalAddress(last_java_pc)); - + Address java_pc(java_thread, + JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); + lea(java_pc, InternalAddress(last_java_pc), rscratch); } movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); } @@ -3125,30 +3170,25 @@ void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { Assembler::ptest(dst, src); } -void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) { - if (reachable(src)) { - Assembler::sqrtsd(dst, as_Address(src)); - } else { - lea(rscratch1, src); - Assembler::sqrtsd(dst, Address(rscratch1, 0)); - } -} +void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); -void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) { if (reachable(src)) { Assembler::sqrtss(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::sqrtss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::sqrtss(dst, Address(rscratch, 0)); } } -void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::subsd(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::subsd(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::subsd(dst, Address(rscratch, 0)); } } @@ -3163,30 +3203,36 @@ void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, } } -void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::subss(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::subss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::subss(dst, Address(rscratch, 0)); } } -void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::ucomisd(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::ucomisd(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::ucomisd(dst, Address(rscratch, 0)); } } -void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::ucomiss(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::ucomiss(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::ucomiss(dst, Address(rscratch, 0)); } } @@ -3233,35 +3279,41 @@ void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratc } } -void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) { +void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + // Used in sign-bit flipping with aligned address. bool aligned_adr = (((intptr_t)src.target() & 15) == 0); assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); if (reachable(src)) { Assembler::pshufb(dst, as_Address(src)); } else { - lea(rscratch1, src); - Assembler::pshufb(dst, Address(rscratch1, 0)); + lea(rscratch, src); + Assembler::pshufb(dst, Address(rscratch, 0)); } } // AVX 3-operands instructions -void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vaddsd(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vaddsd(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vaddsd(dst, nds, Address(rscratch, 0)); } } -void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vaddss(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vaddss(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vaddss(dst, nds, Address(rscratch, 0)); } } @@ -3289,14 +3341,18 @@ void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src } } -void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) { +void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); - vandps(dst, nds, negate_field, vector_len); + assert(rscratch != noreg || always_reachable(negate_field), "missing"); + + vandps(dst, nds, negate_field, vector_len, rscratch); } -void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) { +void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); - vandpd(dst, nds, negate_field, vector_len); + assert(rscratch != noreg || always_reachable(negate_field), "missing"); + + vandpd(dst, nds, negate_field, vector_len, rscratch); } void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { @@ -3320,7 +3376,7 @@ void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int v } void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { - assert(rscratch != noreg || always_reachable(src), "missing"); + assert(rscratch != noreg || always_reachable(src), "missing"); if (reachable(src)) { Assembler::vpand(dst, nds, as_Address(src), vector_len); @@ -3331,6 +3387,8 @@ void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, } void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::vpbroadcastd(dst, as_Address(src), vector_len); } else { @@ -3340,6 +3398,8 @@ void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vecto } void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::vpbroadcastq(dst, as_Address(src), vector_len); } else { @@ -3349,6 +3409,8 @@ void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vecto } void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::vbroadcastsd(dst, as_Address(src), vector_len); } else { @@ -3358,6 +3420,8 @@ void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vecto } void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::vbroadcastss(dst, as_Address(src), vector_len); } else { @@ -3376,53 +3440,62 @@ void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, Assembler::vpcmpeqw(dst, nds, src, vector_len); } -void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, - AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - Assembler::evpcmpeqd(kdst, mask, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); } } void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg) { + int comparison, bool is_signed, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); } else { - lea(scratch_reg, src); - Assembler::evpcmpd(kdst, mask, nds, Address(scratch_reg, 0), comparison, is_signed, vector_len); + lea(rscratch, src); + Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); } } void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg) { + int comparison, bool is_signed, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); } else { - lea(scratch_reg, src); - Assembler::evpcmpq(kdst, mask, nds, Address(scratch_reg, 0), comparison, is_signed, vector_len); + lea(rscratch, src); + Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); } } void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg) { + int comparison, bool is_signed, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); } else { - lea(scratch_reg, src); - Assembler::evpcmpb(kdst, mask, nds, Address(scratch_reg, 0), comparison, is_signed, vector_len); + lea(rscratch, src); + Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); } } void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg) { + int comparison, bool is_signed, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); } else { - lea(scratch_reg, src); - Assembler::evpcmpw(kdst, mask, nds, Address(scratch_reg, 0), comparison, is_signed, vector_len); + lea(rscratch, src); + Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); } } @@ -3491,13 +3564,15 @@ void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int Assembler::vpmullw(dst, nds, src, vector_len); } -void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { assert((UseAVX > 0), "AVX support is needed"); + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::vpmulld(dst, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - Assembler::vpmulld(dst, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); } } @@ -3587,136 +3662,165 @@ void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshuflw(dst, src, mode); } -void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vandpd(dst, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - vandpd(dst, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + vandpd(dst, nds, Address(rscratch, 0), vector_len); } } -void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vandps(dst, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - vandps(dst, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + vandps(dst, nds, Address(rscratch, 0), vector_len); } } void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, - bool merge, int vector_len, Register scratch_reg) { + bool merge, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); } else { - lea(scratch_reg, src); - Assembler::evpord(dst, mask, nds, Address(scratch_reg, 0), merge, vector_len); + lea(rscratch, src); + Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); } } -void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vdivsd(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vdivsd(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vdivsd(dst, nds, Address(rscratch, 0)); } } -void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vdivss(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vdivss(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vdivss(dst, nds, Address(rscratch, 0)); } } -void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vmulsd(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vmulsd(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vmulsd(dst, nds, Address(rscratch, 0)); } } -void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vmulss(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vmulss(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vmulss(dst, nds, Address(rscratch, 0)); } } -void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vsubsd(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vsubsd(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vsubsd(dst, nds, Address(rscratch, 0)); } } -void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vsubss(dst, nds, as_Address(src)); } else { - lea(rscratch1, src); - vsubss(dst, nds, Address(rscratch1, 0)); + lea(rscratch, src); + vsubss(dst, nds, Address(rscratch, 0)); } } -void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); - vxorps(dst, nds, src, Assembler::AVX_128bit); + assert(rscratch != noreg || always_reachable(src), "missing"); + + vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); } -void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { +void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); - vxorpd(dst, nds, src, Assembler::AVX_128bit); + assert(rscratch != noreg || always_reachable(src), "missing"); + + vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); } -void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vxorpd(dst, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - vxorpd(dst, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + vxorpd(dst, nds, Address(rscratch, 0), vector_len); } } -void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { vxorps(dst, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - vxorps(dst, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + vxorps(dst, nds, Address(rscratch, 0), vector_len); } } -void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (UseAVX > 1 || (vector_len < 1)) { if (reachable(src)) { Assembler::vpxor(dst, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - Assembler::vpxor(dst, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); } - } - else { - MacroAssembler::vxorpd(dst, nds, src, vector_len, scratch_reg); + } else { + MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); } } -void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { +void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(rscratch != noreg || always_reachable(src), "missing"); + if (reachable(src)) { Assembler::vpermd(dst, nds, as_Address(src), vector_len); } else { - lea(scratch_reg, src); - Assembler::vpermd(dst, nds, Address(scratch_reg, 0), vector_len); + lea(rscratch, src); + Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); } } @@ -4381,6 +4485,13 @@ void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { if (!VerifyOops) return; + BLOCK_COMMENT("verify_oop {"); +#ifdef _LP64 + push(rscratch1); +#endif + push(rax); // save rax + push(reg); // pass register argument + // Pass register number to verify_oop_subroutine const char* b = NULL; { @@ -4389,17 +4500,9 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); b = code_string(ss.as_string()); } - BLOCK_COMMENT("verify_oop {"); -#ifdef _LP64 - push(rscratch1); // save r10, trashed by movptr() -#endif - push(rax); // save rax, - push(reg); // pass register argument ExternalAddress buffer((address) b); - // avoid using pushptr, as it modifies scratch registers - // and our contract is not to modify anything - movptr(rax, buffer.addr()); - push(rax); + pushptr(buffer.addr(), rscratch1); + // call indirectly to solve generation ordering problem movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); call(rax); @@ -4444,19 +4547,10 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { if (!VerifyOops) return; - // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); - // Pass register number to verify_oop_subroutine - const char* b = NULL; - { - ResourceMark rm; - stringStream ss; - ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); - b = code_string(ss.as_string()); - } #ifdef _LP64 - push(rscratch1); // save r10, trashed by movptr() + push(rscratch1); #endif - push(rax); // save rax, + push(rax); // save rax, // addr may contain rsp so we will have to adjust it based on the push // we just did (and on 64 bit we do two pushes) // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which @@ -4468,12 +4562,16 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f pushptr(addr); } + // Pass register number to verify_oop_subroutine + const char* b = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); + b = code_string(ss.as_string()); + } ExternalAddress buffer((address) b); - // pass msg argument - // avoid using pushptr, as it modifies scratch registers - // and our contract is not to modify anything - movptr(rax, buffer.addr()); - push(rax); + pushptr(buffer.addr(), rscratch1); // call indirectly to solve generation ordering problem movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); @@ -4850,7 +4948,7 @@ void MacroAssembler::verify_FPU(int stack_depth, const char* s) { push(rsp); // pass CPU state ExternalAddress msg((address) s); // pass message string s - pushptr(msg.addr()); + pushptr(msg.addr(), noreg); push(stack_depth); // pass stack depth call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); addptr(rsp, 3 * wordSize); // discard arguments @@ -4865,12 +4963,12 @@ void MacroAssembler::verify_FPU(int stack_depth, const char* s) { } #endif // _LP64 -void MacroAssembler::restore_cpu_control_state_after_jni() { +void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { // Either restore the MXCSR register after returning from the JNI Call // or verify that it wasn't changed (with -Xcheck:jni flag). if (VM_Version::supports_sse()) { if (RestoreMXCSROnJNICalls) { - ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std())); + ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); } else if (CheckJNICalls) { call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); } @@ -5017,13 +5115,12 @@ void MacroAssembler::verify_heapbase(const char* msg) { assert (Universe::heap() != NULL, "java heap should be initialized"); if (CheckCompressedOops) { Label ok; - const auto src2 = ExternalAddress((address)CompressedOops::ptrs_base_addr()); - assert(!src2.is_lval(), "should not be lval"); + ExternalAddress src2(CompressedOops::ptrs_base_addr()); const bool is_src2_reachable = reachable(src2); if (!is_src2_reachable) { push(rscratch1); // cmpptr trashes rscratch1 } - cmpptr(r12_heapbase, src2); + cmpptr(r12_heapbase, src2, rscratch1); jcc(Assembler::equal, ok); STOP(msg); bind(ok); @@ -5312,7 +5409,7 @@ void MacroAssembler::reinit_heapbase() { mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); } } else { - movptr(r12_heapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr())); + movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); } } } @@ -7270,7 +7367,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi // Fold total 512 bits of polynomial on each iteration, // 128 bits per each of 4 parallel streams. - movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32)); + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); align32(); BIND(L_fold_512b_loop); @@ -7284,7 +7381,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi // Fold 512 bits to 128 bits. BIND(L_fold_512b); - movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); @@ -7293,7 +7390,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi BIND(L_fold_tail); addl(len, 3); jccb(Assembler::lessEqual, L_fold_128b); - movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); BIND(L_fold_tail_loop); fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); @@ -7303,7 +7400,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi // Fold 128 bits in xmm1 down into 32 bits in crc register. BIND(L_fold_128b); - movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr())); + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); if (UseAVX > 0) { vpclmulqdq(xmm2, xmm0, xmm1, 0x1); vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); @@ -9403,9 +9500,9 @@ Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) } SkipIfEqual::SkipIfEqual( - MacroAssembler* masm, const bool* flag_addr, bool value) { + MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) { _masm = masm; - _masm->cmp8(ExternalAddress((address)flag_addr), value); + _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch); _masm->jcc(Assembler::equal, _label); } @@ -9453,3 +9550,19 @@ void MacroAssembler::get_thread(Register thread) { #endif // !WIN32 || _LP64 + +void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { + Label L_stack_ok; + if (bias == 0) { + testptr(sp, 2 * wordSize - 1); + } else { + // lea(tmp, Address(rsp, bias); + mov(tmp, sp); + addptr(tmp, bias); + testptr(tmp, 2 * wordSize - 1); + } + jcc(Assembler::equal, L_stack_ok); + block_comment(msg); + stop(msg); + bind(L_stack_ok); +} diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index e76a242b73e72..33d1e6c359538 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -89,7 +89,7 @@ class MacroAssembler: public Assembler { virtual void check_and_handle_earlyret(Register java_thread); Address as_Address(AddressLiteral adr); - Address as_Address(ArrayAddress adr); + Address as_Address(ArrayAddress adr, Register rscratch); // Support for NULL-checks // @@ -169,7 +169,7 @@ class MacroAssembler: public Assembler { else { movss (dst, src); return; } } void movflt(XMMRegister dst, Address src) { movss(dst, src); } - void movflt(XMMRegister dst, AddressLiteral src); + void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void movflt(Address dst, XMMRegister src) { movss(dst, src); } // Move with zero extension @@ -181,7 +181,7 @@ class MacroAssembler: public Assembler { else { movsd (dst, src); return; } } - void movdbl(XMMRegister dst, AddressLiteral src); + void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void movdbl(XMMRegister dst, Address src) { if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } @@ -189,10 +189,10 @@ class MacroAssembler: public Assembler { } void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } - void incrementl(AddressLiteral dst); - void incrementl(ArrayAddress dst); + void incrementl(AddressLiteral dst, Register rscratch = noreg); + void incrementl(ArrayAddress dst, Register rscratch); - void incrementq(AddressLiteral dst); + void incrementq(AddressLiteral dst, Register rscratch = noreg); // Alignment void align32(); @@ -311,12 +311,14 @@ class MacroAssembler: public Assembler { void set_last_Java_frame(Register thread, Register last_java_sp, Register last_java_fp, - address last_java_pc); + address last_java_pc, + Register rscratch); // thread in the default location (r15_thread on 64bit) void set_last_Java_frame(Register last_java_sp, Register last_java_fp, - address last_java_pc); + address last_java_pc, + Register rscratch); void reset_last_Java_frame(Register thread, bool clear_fp); @@ -337,9 +339,9 @@ class MacroAssembler: public Assembler { void movbool(Address dst, Register src); void testbool(Register dst); - void resolve_oop_handle(Register result, Register tmp = rscratch2); + void resolve_oop_handle(Register result, Register tmp); void resolve_weak_handle(Register result, Register tmp); - void load_mirror(Register mirror, Register method, Register tmp = rscratch2); + void load_mirror(Register mirror, Register method, Register tmp); void load_method_holder_cld(Register rresult, Register rmethod); void load_method_holder(Register holder, Register method); @@ -660,7 +662,7 @@ class MacroAssembler: public Assembler { #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) // Verify or restore cpu control state after JNI call - void restore_cpu_control_state_after_jni(); + void restore_cpu_control_state_after_jni(Register rscratch); // prints msg, dumps registers and stops execution void stop(const char* msg); @@ -728,14 +730,14 @@ class MacroAssembler: public Assembler { void andptr(Register dst, int32_t src); void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } - void cmp8(AddressLiteral src1, int imm); + void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); // renamed to drag out the casting of address to int32_t/intptr_t void cmp32(Register src1, int32_t imm); - void cmp32(AddressLiteral src1, int32_t imm); + void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); // compare reg - mem, or reg - &mem - void cmp32(Register src1, AddressLiteral src2); + void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); void cmp32(Register src1, Address src2); @@ -747,12 +749,12 @@ class MacroAssembler: public Assembler { void cmpoop(Register src1, Register src2); void cmpoop(Register src1, Address src2); - void cmpoop(Register dst, jobject obj); + void cmpoop(Register dst, jobject obj, Register rscratch); // NOTE src2 must be the lval. This is NOT an mem-mem compare - void cmpptr(Address src1, AddressLiteral src2); + void cmpptr(Address src1, AddressLiteral src2, Register rscratch); - void cmpptr(Register src1, AddressLiteral src2); + void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } @@ -762,12 +764,11 @@ class MacroAssembler: public Assembler { void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } // cmp64 to avoild hiding cmpq - void cmp64(Register src1, AddressLiteral src, Register rscratch = rscratch1); + void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); void cmpxchgptr(Register reg, Address adr); - void locked_cmpxchgptr(Register reg, AddressLiteral adr); - + void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } @@ -810,27 +811,27 @@ class MacroAssembler: public Assembler { // Helper functions for statistics gathering. // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. - void cond_inc32(Condition cond, AddressLiteral counter_addr); + void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); // Unconditional atomic increment. void atomic_incl(Address counter_addr); - void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); + void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); #ifdef _LP64 void atomic_incq(Address counter_addr); - void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); + void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); #endif - void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } + void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } + void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } void lea(Register dst, AddressLiteral adr); - void lea(Address dst, AddressLiteral adr); - void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } + void lea(Address dst, AddressLiteral adr, Register rscratch); void leal32(Register dst, Address src) { leal(dst, src); } // Import other testl() methods from the parent class or else // they will be hidden by the following overriding declaration. using Assembler::testl; - void testl(Register dst, AddressLiteral src); + void testl(Register dst, AddressLiteral src); // requires reachable address void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } @@ -853,7 +854,7 @@ class MacroAssembler: public Assembler { // NOTE: this call transfers to the effective address of entry NOT // the address contained by entry. This is because this is more natural // for jumps/calls. - void call(AddressLiteral entry); + void call(AddressLiteral entry, Register rscratch = rax); // Emit the CompiledIC call idiom void ic_call(address entry, jint method_index = 0); @@ -865,13 +866,14 @@ class MacroAssembler: public Assembler { // NOTE: these jumps transfer to the effective address of dst NOT // the address contained by dst. This is because this is more natural // for jumps/calls. - void jump(AddressLiteral dst); - void jump_cc(Condition cc, AddressLiteral dst); + void jump(AddressLiteral dst, Register rscratch = noreg); + + void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); // 32bit can do a case table jump in one instruction but we no longer allow the base // to be installed in the Address class. This jump will transfer to the address // contained in the location described by entry (not the address of entry) - void jump(ArrayAddress entry); + void jump(ArrayAddress entry, Register rscratch); // Floating @@ -880,45 +882,45 @@ class MacroAssembler: public Assembler { void push_d(XMMRegister r); void pop_d(XMMRegister r); - void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } - void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); - void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } + void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } + void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } + void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } - void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } - void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); + void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } + void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } + void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } - void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } - void comiss(XMMRegister dst, AddressLiteral src); + void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } + void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } + void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } - void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } - void comisd(XMMRegister dst, AddressLiteral src); + void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } + void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } + void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); #ifndef _LP64 - void fadd_s(Address src) { Assembler::fadd_s(src); } + void fadd_s(Address src) { Assembler::fadd_s(src); } void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } - void fldcw(Address src) { Assembler::fldcw(src); } + void fldcw(Address src) { Assembler::fldcw(src); } void fldcw(AddressLiteral src); - void fld_s(int index) { Assembler::fld_s(index); } - void fld_s(Address src) { Assembler::fld_s(src); } + void fld_s(int index) { Assembler::fld_s(index); } + void fld_s(Address src) { Assembler::fld_s(src); } void fld_s(AddressLiteral src); - void fld_d(Address src) { Assembler::fld_d(src); } + void fld_d(Address src) { Assembler::fld_d(src); } void fld_d(AddressLiteral src); - void fmul_s(Address src) { Assembler::fmul_s(src); } - void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } -#endif // _LP64 + void fld_x(Address src) { Assembler::fld_x(src); } + void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } - void fld_x(Address src) { Assembler::fld_x(src); } - void fld_x(AddressLiteral src); + void fmul_s(Address src) { Assembler::fmul_s(src); } + void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } +#endif // !_LP64 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } - void ldmxcsr(AddressLiteral src, Register scratchReg = rscratch1); + void ldmxcsr(AddressLiteral src, Register rscratch = noreg); #ifdef _LP64 private: @@ -1125,61 +1127,61 @@ class MacroAssembler: public Assembler { // these are private because users should be doing movflt/movdbl - void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } - void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } - void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } - void movss(XMMRegister dst, AddressLiteral src); + void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } + void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } + void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } + void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } - void movlpd(XMMRegister dst, AddressLiteral src); + void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } + void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); public: void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } - void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } - void addss(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } - void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); using Assembler::vbroadcastsd; - void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); + void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); using Assembler::vbroadcastss; - void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); + void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); - void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } - void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } - void divsd(XMMRegister dst, AddressLiteral src); + void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } + void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } + void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } - void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } - void divss(XMMRegister dst, AddressLiteral src); + void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } + void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } + void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // Move Unaligned Double Quadword void movdqu(Address dst, XMMRegister src); void movdqu(XMMRegister dst, XMMRegister src); void movdqu(XMMRegister dst, Address src); - void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } - void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); - void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } - void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } - void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } - void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } - void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } - void kmovql(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); + void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } + void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } + void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } + void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } + void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } + void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); // Safe move operation, lowers down to 16bit moves for targets supporting // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. @@ -1190,16 +1192,17 @@ class MacroAssembler: public Assembler { void kmov(KRegister dst, Register src); using Assembler::movddup; - void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + using Assembler::vmovddup; - void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); + void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); // AVX Unaligned forms void vmovdqu(Address dst, XMMRegister src); void vmovdqu(XMMRegister dst, Address src); void vmovdqu(XMMRegister dst, XMMRegister src); - void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); - void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); + void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); // AVX512 Unaligned void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); @@ -1213,28 +1216,29 @@ class MacroAssembler: public Assembler { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } } - void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } - void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } - void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); + void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } + void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } + void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); + + void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } + void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } - void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } - void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { if (dst->encoding() != src->encoding() || mask != k0) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } } - void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } - void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } - void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); + void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } + void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } + void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { if (dst->encoding() != src->encoding()) { Assembler::evmovdqul(dst, src, vector_len); } } - void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } - void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } + void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } + void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { if (dst->encoding() != src->encoding() || mask != k0) { @@ -1243,7 +1247,7 @@ class MacroAssembler: public Assembler { } void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } - void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch); + void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { if (dst->encoding() != src->encoding()) { @@ -1264,26 +1268,26 @@ class MacroAssembler: public Assembler { void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); // Move Aligned Double Quadword - void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } - void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } - void movdqa(XMMRegister dst, AddressLiteral src); + void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } + void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } + void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } - void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } - void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } - void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } - void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } - void mulss(XMMRegister dst, AddressLiteral src); + void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } + void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } + void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // Carry-Less Multiplication Quadword void pclmulldq(XMMRegister dst, XMMRegister src) { @@ -1308,67 +1312,63 @@ class MacroAssembler: public Assembler { void ptest(XMMRegister dst, XMMRegister src); - void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } - void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } - void sqrtsd(XMMRegister dst, AddressLiteral src); - void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } - void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch); + void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); - void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } - void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } - void sqrtss(XMMRegister dst, AddressLiteral src); + void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } + void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } + void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } - void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } - void subsd(XMMRegister dst, AddressLiteral src); + void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } + void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } + void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } - void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } - void subss(XMMRegister dst, AddressLiteral src); + void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } + void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } + void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } - void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } - void ucomiss(XMMRegister dst, AddressLiteral src); + void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } + void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } + void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); - void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } - void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } - void ucomisd(XMMRegister dst, AddressLiteral src); + void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } + void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } + void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values void xorpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } - void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values void xorps(XMMRegister dst, XMMRegister src); void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } - void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // Shuffle Bytes - void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } - void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } - void pshufb(XMMRegister dst, AddressLiteral src); + void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } + void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } + void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // AVX 3-operands instructions - void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } - void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } - void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } + void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } + void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } - void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } - void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } + void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } + void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); - void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); + void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); + void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); - void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); + void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); - void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); + void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } @@ -1376,36 +1376,39 @@ class MacroAssembler: public Assembler { void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } - void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = rscratch1); + void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); using Assembler::vpbroadcastd; - void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); + void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); using Assembler::vpbroadcastq; - void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); + void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); - void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); + void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); // Vector compares - void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, - int comparison, bool is_signed, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); } - void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg); - void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, - int comparison, bool is_signed, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); } - void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg); - void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, - int comparison, bool is_signed, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); } - void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg); - void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, - int comparison, bool is_signed, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); } - void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, - int comparison, bool is_signed, int vector_len, Register scratch_reg); + void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { + Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); + } + void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); + + void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { + Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); + } + void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); + + void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { + Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); + } + void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); + + void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { + Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); + } + void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); @@ -1413,32 +1416,29 @@ class MacroAssembler: public Assembler { void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); - void vpmovzxbw(XMMRegister dst, Address src, int vector_len); + void vpmovzxbw(XMMRegister dst, Address src, int vector_len); void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); - void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); - void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { - Assembler::vpmulld(dst, nds, src, vector_len); - }; - void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { - Assembler::vpmulld(dst, nds, src, vector_len); - } - void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); + void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); + + void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } + void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } + void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); - void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); + void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); - void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); + void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); - void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); + void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); - void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); + void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { if (!is_varshift) { @@ -1527,52 +1527,52 @@ class MacroAssembler: public Assembler { void pshuflw(XMMRegister dst, XMMRegister src, int mode); void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } - void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } - void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } - void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); + void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } + void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } + void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); - void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } - void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } - void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); + void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } + void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } + void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); - void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); + void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); - void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } - void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } - void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } + void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } + void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } - void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } - void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } + void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } + void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } - void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } - void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } + void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } + void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } - void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } - void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } + void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } + void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } - void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } - void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } + void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } + void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } - void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } - void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } + void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } + void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); - void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); - void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); + void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); // AVX Vector instructions - void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } - void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } - void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); + void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } + void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } + void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); - void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } - void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } - void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); + void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } + void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } + void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 @@ -1586,7 +1586,7 @@ class MacroAssembler: public Assembler { else Assembler::vxorpd(dst, nds, src, vector_len); } - void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); + void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); // Simple version for AVX2 256bit vectors void vpxor(XMMRegister dst, XMMRegister src) { @@ -1598,8 +1598,8 @@ class MacroAssembler: public Assembler { Assembler::vpxor(dst, dst, src, AVX_256bit); } - void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } - void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); + void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } + void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { if (UseAVX > 2 && VM_Version::supports_avx512novl()) { @@ -1841,10 +1841,10 @@ class MacroAssembler: public Assembler { void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } void movoop(Register dst, jobject obj); - void movoop(Address dst, jobject obj); + void movoop(Address dst, jobject obj, Register rscratch); void mov_metadata(Register dst, Metadata* obj); - void mov_metadata(Address dst, Metadata* obj); + void mov_metadata(Address dst, Metadata* obj, Register rscratch); void movptr(Register dst, Register src); void movptr(Register dst, Address src); @@ -1852,47 +1852,36 @@ class MacroAssembler: public Assembler { void movptr(Register dst, ArrayAddress src); void movptr(Register dst, intptr_t src); void movptr(Address dst, Register src); - void movptr(Address dst, intptr_t src); - void movptr(ArrayAddress dst, Register src); + void movptr(Address dst, int32_t imm); + void movptr(Address dst, intptr_t src, Register rscratch); + void movptr(ArrayAddress dst, Register src, Register rscratch); void movptr(Register dst, RegisterOrConstant src) { if (src.is_constant()) movptr(dst, src.as_constant()); else movptr(dst, src.as_register()); } -#ifdef _LP64 - // Generally the next two are only used for moving NULL - // Although there are situations in initializing the mark word where - // they could be used. They are dangerous. - - // They only exist on LP64 so that int32_t and intptr_t are not the same - // and we have ambiguous declarations. - - void movptr(Address dst, int32_t imm32); -#endif // _LP64 // to avoid hiding movl - void mov32(AddressLiteral dst, Register src); - void mov32(Register dst, AddressLiteral src); - - // to avoid hiding movb - void movbyte(ArrayAddress dst, int src); + void mov32(Register dst, AddressLiteral src); + void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); // Import other mov() methods from the parent class or else // they will be hidden by the following overriding declaration. using Assembler::movdl; + void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); + using Assembler::movq; - void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); - void movq (XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1); + void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); // Can push value or effective address - void pushptr(AddressLiteral src); + void pushptr(AddressLiteral src, Register rscratch); void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } - void pushoop(jobject obj); - void pushklass(Metadata* obj); + void pushoop(jobject obj, Register rscratch); + void pushklass(Metadata* obj, Register rscratch); // sign extend as need a l to ptr sized element void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } @@ -2091,9 +2080,16 @@ class MacroAssembler: public Assembler { #endif // COMPILER2_OR_JVMCI + OopMap* continuation_enter_setup(int& stack_slots); + void fill_continuation_entry(Register reg_cont_obj, Register reg_flags); + void continuation_enter_cleanup(); + #endif // _LP64 void vallones(XMMRegister dst, int vector_len); + + void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); + }; /** @@ -2110,7 +2106,7 @@ class SkipIfEqual { Label _label; public: - SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); + SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch); ~SkipIfEqual(); }; diff --git a/src/hotspot/cpu/x86/macroAssembler_x86_64.cpp b/src/hotspot/cpu/x86/macroAssembler_x86_64.cpp new file mode 100644 index 0000000000000..a6765c5d7dd8f --- /dev/null +++ b/src/hotspot/cpu/x86/macroAssembler_x86_64.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/continuation.hpp" +#include "runtime/continuationEntry.hpp" +#include "runtime/javaThread.hpp" +#include "macroAssembler_x86.hpp" + +//---------------------------- continuation_enter_setup --------------------------- +// +// Arguments: +// None. +// +// Results: +// rsp: pointer to blank ContinuationEntry +// +// Kills: +// rax +// +OopMap* MacroAssembler::continuation_enter_setup(int& stack_slots) { + assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); + assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); + assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); + + stack_slots += checked_cast(ContinuationEntry::size()) / wordSize; + subptr(rsp, checked_cast(ContinuationEntry::size())); + + int frame_size = (checked_cast(ContinuationEntry::size()) + wordSize) / VMRegImpl::stack_slot_size; + OopMap* map = new OopMap(frame_size, 0); + ContinuationEntry::setup_oopmap(map); + + movptr(rax, Address(r15_thread, JavaThread::cont_entry_offset())); + movptr(Address(rsp, ContinuationEntry::parent_offset()), rax); + movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rsp); + + return map; +} + +//---------------------------- fill_continuation_entry --------------------------- +// +// Arguments: +// rsp: pointer to blank Continuation entry +// reg_cont_obj: pointer to the continuation +// reg_flags: flags +// +// Results: +// rsp: pointer to filled out ContinuationEntry +// +// Kills: +// rax +// +void MacroAssembler::fill_continuation_entry(Register reg_cont_obj, Register reg_flags) { + assert_different_registers(rax, reg_cont_obj, reg_flags); +#ifdef ASSERT + movl(Address(rsp, ContinuationEntry::cookie_offset()), ContinuationEntry::cookie_value()); +#endif + movptr(Address(rsp, ContinuationEntry::cont_offset()), reg_cont_obj); + movl (Address(rsp, ContinuationEntry::flags_offset()), reg_flags); + movptr(Address(rsp, ContinuationEntry::chunk_offset()), 0); + movl(Address(rsp, ContinuationEntry::argsize_offset()), 0); + movl(Address(rsp, ContinuationEntry::pin_count_offset()), 0); + + movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset())); + movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax); + movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset())); + movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax); + + movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0); + movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0); +} + +//---------------------------- continuation_enter_cleanup --------------------------- +// +// Arguments: +// rsp: pointer to the ContinuationEntry +// +// Results: +// rsp: pointer to the spilled rbp in the entry frame +// +// Kills: +// rbx +// +void MacroAssembler::continuation_enter_cleanup() { +#ifdef ASSERT + Label L_good_sp; + cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset())); + jcc(Assembler::equal, L_good_sp); + stop("Incorrect rsp at continuation_enter_cleanup"); + bind(L_good_sp); +#endif + + movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset())); + movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx); + movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset())); + movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx); + + movptr(rbx, Address(rsp, ContinuationEntry::parent_offset())); + movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx); + addptr(rsp, checked_cast(ContinuationEntry::size())); +} diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp index 0c0ba7681b300..94c2e544e7b29 100644 --- a/src/hotspot/cpu/x86/methodHandles_x86.cpp +++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp @@ -78,28 +78,29 @@ void MethodHandles::verify_klass(MacroAssembler* _masm, InstanceKlass** klass_addr = vmClasses::klass_addr_at(klass_id); Klass* klass = vmClasses::klass_at(klass_id); Register temp = rdi; - Register temp2 = noreg; - LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr and load_klass Label L_ok, L_bad; BLOCK_COMMENT("verify_klass {"); __ verify_oop(obj); __ testptr(obj, obj); __ jcc(Assembler::zero, L_bad); - __ push(temp); if (temp2 != noreg) __ push(temp2); -#define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } - __ load_klass(temp, obj, temp2); - __ cmpptr(temp, ExternalAddress((address) klass_addr)); +#define PUSH { __ push(temp); LP64_ONLY( __ push(rscratch1); ) } +#define POP { LP64_ONLY( __ pop(rscratch1); ) __ pop(temp); } + PUSH; + __ load_klass(temp, obj, rscratch1); + __ cmpptr(temp, ExternalAddress((address) klass_addr), rscratch1); __ jcc(Assembler::equal, L_ok); intptr_t super_check_offset = klass->super_check_offset(); __ movptr(temp, Address(temp, super_check_offset)); - __ cmpptr(temp, ExternalAddress((address) klass_addr)); + __ cmpptr(temp, ExternalAddress((address) klass_addr), rscratch1); __ jcc(Assembler::equal, L_ok); - UNPUSH; + POP; __ bind(L_bad); __ STOP(error_message); __ BIND(L_ok); - UNPUSH; + POP; BLOCK_COMMENT("} verify_klass"); +#undef POP +#undef PUSH } void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { @@ -672,7 +673,7 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt __ push(rbx); // pusha saved_regs __ push(rcx); // mh __ push(rcx); // slot for adaptername - __ movptr(Address(rsp, 0), (intptr_t) adaptername); + __ movptr(Address(rsp, 0), (intptr_t) adaptername, rscratch1); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp); __ increment(rsp, sizeof(MethodHandleStubArguments)); diff --git a/src/hotspot/cpu/x86/nativeInst_x86.cpp b/src/hotspot/cpu/x86/nativeInst_x86.cpp index b57c57bc803b2..37aa95add825b 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.cpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp @@ -495,7 +495,7 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_ } -// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) +// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::make_not_entrant) // The problem: jmp is a 5-byte instruction. Atomic write can be only with 4 bytes. // First patches the first word atomically to be a jump to itself. // Then patches the last byte and then atomically patches the first word (4-bytes), diff --git a/src/hotspot/cpu/x86/runtime_x86_32.cpp b/src/hotspot/cpu/x86/runtime_x86_32.cpp index c409edb909861..c0c2d2a101d64 100644 --- a/src/hotspot/cpu/x86/runtime_x86_32.cpp +++ b/src/hotspot/cpu/x86/runtime_x86_32.cpp @@ -109,7 +109,7 @@ void OptoRuntime::generate_exception_blob() { // registers of the frame being removed. // __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument - __ set_last_Java_frame(rcx, noreg, noreg, NULL); + __ set_last_Java_frame(rcx, noreg, noreg, NULL, noreg); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp index 8c0e15ff47442..4f628aaa0c955 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -1660,14 +1660,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, intptr_t the_pc = (intptr_t) __ pc(); oop_maps->add_gc_map(the_pc - start, map); - __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc); + __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc, noreg); // We have all of the arguments setup at this point. We must not touch any register // argument registers at this point (what if we save/restore them there are no oop? { - SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); + SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg); __ mov_metadata(rax, method()); __ call_VM_leaf( CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), @@ -1760,7 +1760,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ call(RuntimeAddress(native_func)); // Verify or restore cpu control state after JNI call - __ restore_cpu_control_state_after_jni(); + __ restore_cpu_control_state_after_jni(noreg); // WARNING - on Windows Java Natives use pascal calling convention and pop the // arguments off of the stack. We could just re-adjust the stack pointer here @@ -1903,7 +1903,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, } { - SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); + SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg); // Tell dtrace about this method exit save_native_result(masm, ret_type, stack_slots); __ mov_metadata(rax, method()); @@ -2233,7 +2233,7 @@ void SharedRuntime::generate_deopt_blob() { __ get_thread(rcx); __ push(rcx); // fetch_unroll_info needs to call last_java_frame() - __ set_last_Java_frame(rcx, noreg, noreg, NULL); + __ set_last_Java_frame(rcx, noreg, noreg, NULL, noreg); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); @@ -2381,7 +2381,7 @@ void SharedRuntime::generate_deopt_blob() { __ push(rcx); // set last_Java_sp, last_Java_fp - __ set_last_Java_frame(rcx, noreg, rbp, NULL); + __ set_last_Java_frame(rcx, noreg, rbp, NULL, noreg); // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should @@ -2478,7 +2478,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { // set last_Java_sp __ get_thread(rdx); - __ set_last_Java_frame(rdx, noreg, noreg, NULL); + __ set_last_Java_frame(rdx, noreg, noreg, NULL, noreg); // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should @@ -2590,7 +2590,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { // set last_Java_sp, last_Java_fp __ get_thread(rdi); - __ set_last_Java_frame(rdi, noreg, rbp, NULL); + __ set_last_Java_frame(rdi, noreg, rbp, NULL, noreg); // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should @@ -2672,7 +2672,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t // Push thread argument and setup last_Java_sp __ get_thread(java_thread); __ push(java_thread); - __ set_last_Java_frame(java_thread, noreg, noreg, NULL); + __ set_last_Java_frame(java_thread, noreg, noreg, NULL, noreg); // if this was not a poll_return then we need to correct the return address now. if (!cause_return) { @@ -2811,7 +2811,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha __ get_thread(rdi); __ push(thread); - __ set_last_Java_frame(thread, noreg, rbp, NULL); + __ set_last_Java_frame(thread, noreg, rbp, NULL, noreg); __ call(RuntimeAddress(destination)); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index 82dc4454ef3ba..17feaa46b8f3c 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -615,25 +615,40 @@ static void gen_c2i_adapter(MacroAssembler *masm, __ bind(skip_fixup); // Since all args are passed on the stack, total_args_passed * - // Interpreter::stackElementSize is the space we need. Plus 1 because - // we also account for the return address location since - // we store it first rather than hold it in rax across all the shuffling + // Interpreter::stackElementSize is the space we need. - int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize; + assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed); + + int extraspace = (total_args_passed * Interpreter::stackElementSize); // stack is aligned, keep it that way + // This is not currently needed or enforced by the interpreter, but + // we might as well conform to the ABI. extraspace = align_up(extraspace, 2*wordSize); - // Get return address - __ pop(rax); - // set senderSP value - __ mov(r13, rsp); + __ lea(r13, Address(rsp, wordSize)); + +#ifdef ASSERT + __ check_stack_alignment(r13, "sender stack not aligned"); +#endif + if (extraspace > 0) { + // Pop the return address + __ pop(rax); - __ subptr(rsp, extraspace); + __ subptr(rsp, extraspace); - // Store the return address in the expected location - __ movptr(Address(rsp, 0), rax); + // Push the return address + __ push(rax); + + // Account for the return address location since we store it first rather + // than hold it in a register across all the shuffling + extraspace += wordSize; + } + +#ifdef ASSERT + __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax); +#endif // Now write the args into the outgoing interpreter space for (int i = 0; i < total_args_passed; i++) { @@ -779,9 +794,6 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, // If this happens, control eventually transfers back to the compiled // caller, but with an uncorrected stack, causing delayed havoc. - // Pick up the return address - __ movptr(rax, Address(rsp, 0)); - if (VerifyAdapterCalls && (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { // So, let's test for cascading c2i/i2c adapters right now. @@ -789,6 +801,8 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, // StubRoutines::contains($return_addr), // "i2c adapter must return to an interpreter frame"); __ block_comment("verify_i2c { "); + // Pick up the return address + __ movptr(rax, Address(rsp, 0)); Label L_ok; if (Interpreter::code() != NULL) range_check(masm, rax, r11, @@ -813,22 +827,16 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, // we need to align the outgoing SP for compiled code. __ movptr(r11, rsp); - // Cut-out for having no stack args. Since up to 2 int/oop args are passed - // in registers, we will occasionally have no stack args. - int comp_words_on_stack = 0; + // Pick up the return address + __ pop(rax); + + // Convert 4-byte c2 stack slots to words. + int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; + if (comp_args_on_stack) { - // Sig words on the stack are greater-than VMRegImpl::stack0. Those in - // registers are below. By subtracting stack0, we either get a negative - // number (all values in registers) or the maximum stack slot accessed. - - // Convert 4-byte c2 stack slots to words. - comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; - // Round up to miminum stack alignment, in wordSize - comp_words_on_stack = align_up(comp_words_on_stack, 2); __ subptr(rsp, comp_words_on_stack * wordSize); } - // Ensure compiled code always sees stack at proper alignment __ andptr(rsp, -16); @@ -1011,7 +1019,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm Register method = rbx; { // Bypass the barrier for non-static methods - Register flags = rscratch1; + Register flags = rscratch1; __ movl(flags, Address(method, Method::access_flags_offset())); __ testl(flags, JVM_ACC_STATIC); __ jcc(Assembler::zero, L_skip_barrier); // non-static @@ -1253,11 +1261,6 @@ static void verify_oop_args(MacroAssembler* masm, } } -// defined in stubGenerator_x86_64.cpp -OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots); -void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags); -void continuation_enter_cleanup(MacroAssembler* masm); - static void check_continuation_enter_argument(VMReg actual_vmreg, Register expected_reg, const char* name) { @@ -1326,13 +1329,13 @@ static void gen_continuation_enter(MacroAssembler* masm, __ enter(); stack_slots = 2; // will be adjusted in setup - OopMap* map = continuation_enter_setup(masm, stack_slots); + OopMap* map = __ continuation_enter_setup(stack_slots); // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. __ verify_oop(reg_cont_obj); - fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); + __ fill_continuation_entry(reg_cont_obj, reg_is_virtual); // If continuation, call to thaw. Otherwise, resolve the call and exit. __ testptr(reg_is_cont, reg_is_cont); @@ -1361,14 +1364,14 @@ static void gen_continuation_enter(MacroAssembler* masm, __ enter(); stack_slots = 2; // will be adjusted in setup - OopMap* map = continuation_enter_setup(masm, stack_slots); + OopMap* map = __ continuation_enter_setup(stack_slots); // Frame is now completed as far as size and linkage. frame_complete = __ pc() - start; __ verify_oop(reg_cont_obj); - fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual); + __ fill_continuation_entry(reg_cont_obj, reg_is_virtual); // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue) __ testptr(reg_is_cont, reg_is_cont); @@ -1411,7 +1414,7 @@ static void gen_continuation_enter(MacroAssembler* masm, __ bind(L_exit); - continuation_enter_cleanup(masm); + __ continuation_enter_cleanup(); __ pop(rbp); __ ret(0); @@ -1419,7 +1422,7 @@ static void gen_continuation_enter(MacroAssembler* masm, exception_offset = __ pc() - start; - continuation_enter_cleanup(masm); + __ continuation_enter_cleanup(); __ pop(rbp); __ movptr(c_rarg0, r15_thread); @@ -1694,7 +1697,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, Label hit; Label exception_pending; - assert_different_registers(ic_reg, receiver, rscratch1); + assert_different_registers(ic_reg, receiver, rscratch1, rscratch2); __ verify_oop(receiver); __ load_klass(rscratch1, receiver, rscratch2); __ cmpq(ic_reg, rscratch1); @@ -1754,15 +1757,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, } #ifdef ASSERT - { - Label L; - __ mov(rax, rsp); - __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI) - __ cmpptr(rax, rsp); - __ jcc(Assembler::equal, L); - __ stop("improperly aligned stack"); - __ bind(L); - } + __ check_stack_alignment(rsp, "improperly aligned stack"); #endif /* ASSERT */ @@ -1911,14 +1906,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, intptr_t the_pc = (intptr_t) __ pc(); oop_maps->add_gc_map(the_pc - start, map); - __ set_last_Java_frame(rsp, noreg, (address)the_pc); + __ set_last_Java_frame(rsp, noreg, (address)the_pc, rscratch1); // We have all of the arguments setup at this point. We must not touch any register // argument registers at this point (what if we save/restore them there are no oop? { - SkipIfEqual skip(masm, &DTraceMethodProbes, false); + SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1); // protect the args we've loaded save_args(masm, total_c_args, c_arg, out_regs); __ mov_metadata(c_rarg1, method()); @@ -2020,7 +2015,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ call(RuntimeAddress(native_func)); // Verify or restore cpu control state after JNI call - __ restore_cpu_control_state_after_jni(); + __ restore_cpu_control_state_after_jni(rscratch1); // Unpack native results. switch (ret_type) { @@ -2149,7 +2144,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ bind(fast_done); } { - SkipIfEqual skip(masm, &DTraceMethodProbes, false); + SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1); save_native_result(masm, ret_type, stack_slots); __ mov_metadata(c_rarg1, method()); __ call_VM_leaf( @@ -2423,7 +2418,7 @@ void SharedRuntime::generate_deopt_blob() { // Save everything in sight. RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true); // fetch_unroll_info needs to call last_java_frame() - __ set_last_Java_frame(noreg, noreg, NULL); + __ set_last_Java_frame(noreg, noreg, NULL, rscratch1); __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset()))); __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1); @@ -2505,7 +2500,7 @@ void SharedRuntime::generate_deopt_blob() { // fetch_unroll_info needs to call last_java_frame(). - __ set_last_Java_frame(noreg, noreg, NULL); + __ set_last_Java_frame(noreg, noreg, NULL, rscratch1); #ifdef ASSERT { Label L; __ cmpptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); @@ -2653,7 +2648,7 @@ void SharedRuntime::generate_deopt_blob() { // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. // Don't need the precise return PC here, just precise enough to point into this code blob. address the_pc = __ pc(); - __ set_last_Java_frame(noreg, rbp, the_pc); + __ set_last_Java_frame(noreg, rbp, the_pc, rscratch1); __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI __ mov(c_rarg0, r15_thread); @@ -2724,7 +2719,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { // runtime expects it. __ movl(c_rarg1, j_rarg0); - __ set_last_Java_frame(noreg, noreg, NULL); + __ set_last_Java_frame(noreg, noreg, NULL, rscratch1); // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should @@ -2841,7 +2836,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. // Don't need the precise return PC here, just precise enough to point into this code blob. address the_pc = __ pc(); - __ set_last_Java_frame(noreg, rbp, the_pc); + __ set_last_Java_frame(noreg, rbp, the_pc, rscratch1); // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should @@ -2918,7 +2913,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t // address of the call in order to generate an oopmap. Hence, we do all the // work ourselves. - __ set_last_Java_frame(noreg, noreg, NULL); // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next: + __ set_last_Java_frame(noreg, noreg, NULL, rscratch1); // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next: // The return address must always be correct so that frame constructor never // sees an invalid pc. @@ -3054,7 +3049,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha ResourceMark rm; CodeBuffer buffer(name, 1200, 512); - MacroAssembler* masm = new MacroAssembler(&buffer); + MacroAssembler* masm = new MacroAssembler(&buffer); int frame_size_in_words; @@ -3068,7 +3063,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha int frame_complete = __ offset(); - __ set_last_Java_frame(noreg, noreg, NULL); + __ set_last_Java_frame(noreg, noreg, NULL, rscratch1); __ mov(c_rarg0, r15_thread); @@ -3464,7 +3459,7 @@ void OptoRuntime::generate_exception_blob() { // At a method handle call, the stack may not be properly aligned // when returning with an exception. address the_pc = __ pc(); - __ set_last_Java_frame(noreg, noreg, the_pc); + __ set_last_Java_frame(noreg, noreg, the_pc, rscratch1); __ mov(c_rarg0, r15_thread); __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp index 6952e9539439b..e60a086770351 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp @@ -343,10 +343,10 @@ class StubGenerator: public StubCodeGenerator { #endif // set pending exception __ verify_oop(rax); - __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); - __ lea(Address(rcx, Thread::exception_file_offset ()), - ExternalAddress((address)__FILE__)); - __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); + __ movptr(Address(rcx, Thread::pending_exception_offset()), rax); + __ lea(Address(rcx, Thread::exception_file_offset()), + ExternalAddress((address)__FILE__), noreg); + __ movl(Address(rcx, Thread::exception_line_offset()), __LINE__ ); // complete return to VM assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); @@ -3846,7 +3846,7 @@ class StubGenerator: public StubCodeGenerator { } // Set up last_Java_sp and last_Java_fp - __ set_last_Java_frame(java_thread, rsp, rbp, NULL); + __ set_last_Java_frame(java_thread, rsp, rbp, NULL, noreg); // Call runtime BLOCK_COMMENT("call runtime_entry"); @@ -3930,7 +3930,7 @@ class StubGenerator: public StubCodeGenerator { static void jfr_prologue(address the_pc, MacroAssembler* masm) { Register java_thread = rdi; __ get_thread(java_thread); - __ set_last_Java_frame(java_thread, rsp, rbp, the_pc); + __ set_last_Java_frame(java_thread, rsp, rbp, the_pc, noreg); __ movptr(Address(rsp, 0), java_thread); } diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index 572b9d8b67eb3..62674da92bc7f 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -79,25 +79,20 @@ #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions -OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots); -void fill_continuation_entry(MacroAssembler* masm); -void continuation_enter_cleanup(MacroAssembler* masm); - // Stub Code definitions class StubGenerator: public StubCodeGenerator { private: #ifdef PRODUCT -#define inc_counter_np(counter) ((void)0) +#define INC_COUNTER_NP(counter, rscratch) ((void)0) #else - void inc_counter_np_(int& counter) { - // This can destroy rscratch1 if counter is far from the code cache - __ incrementl(ExternalAddress((address)&counter)); + void inc_counter_np(int& counter, Register rscratch) { + __ incrementl(ExternalAddress((address)&counter), rscratch); } -#define inc_counter_np(counter) \ +#define INC_COUNTER_NP(counter, rscratch) \ BLOCK_COMMENT("inc_counter " #counter); \ - inc_counter_np_(counter); + inc_counter_np(counter, rscratch); #endif // Call stubs are used to call Java from C @@ -301,9 +296,9 @@ class StubGenerator: public StubCodeGenerator { __ movl(rax, mxcsr_save); __ andl(rax, MXCSR_MASK); // Only check control and mask bits ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); - __ cmp32(rax, mxcsr_std); + __ cmp32(rax, mxcsr_std, rscratch1); __ jcc(Assembler::equal, skip_ldmx); - __ ldmxcsr(mxcsr_std); + __ ldmxcsr(mxcsr_std, rscratch1); __ bind(skip_ldmx); } #endif @@ -621,12 +616,12 @@ class StubGenerator: public StubCodeGenerator { __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save); __ andl(rax, MXCSR_MASK); // Only check control and mask bits - __ cmp32(rax, mxcsr_std); + __ cmp32(rax, mxcsr_std, rscratch1); __ jcc(Assembler::equal, ok_ret); __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); - __ ldmxcsr(mxcsr_std); + __ ldmxcsr(mxcsr_std, rscratch1); __ bind(ok_ret); __ addptr(rsp, wordSize); @@ -1042,7 +1037,7 @@ class StubGenerator: public StubCodeGenerator { Label exit, error; __ pushf(); - __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); + __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()), rscratch1); __ push(r12); @@ -1674,7 +1669,7 @@ class StubGenerator: public StubCodeGenerator { } bs->arraycopy_epilogue(_masm, decorators, type, from, to, count); restore_argument_regs(type); - inc_counter_np(get_profile_ctr(shift)); // Update counter after rscratch1 is free + INC_COUNTER_NP(get_profile_ctr(shift), rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -1849,7 +1844,7 @@ class StubGenerator: public StubCodeGenerator { } bs->arraycopy_epilogue(_masm, decorators, type, from, to, count); restore_argument_regs(type); - inc_counter_np(get_profile_ctr(shift)); // Update counter after rscratch1 is free + INC_COUNTER_NP(get_profile_ctr(shift), rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -1963,7 +1958,7 @@ class StubGenerator: public StubCodeGenerator { __ BIND(L_exit); address ucme_exit_pc = __ pc(); restore_arg_regs(); - inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jbyte_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2064,7 +2059,7 @@ class StubGenerator: public StubCodeGenerator { __ jcc(Assembler::notZero, L_copy_8_bytes); } restore_arg_regs(); - inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jbyte_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2077,7 +2072,7 @@ class StubGenerator: public StubCodeGenerator { copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); } restore_arg_regs(); - inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jbyte_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2183,7 +2178,7 @@ class StubGenerator: public StubCodeGenerator { __ BIND(L_exit); address ucme_exit_pc = __ pc(); restore_arg_regs(); - inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jshort_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2299,7 +2294,7 @@ class StubGenerator: public StubCodeGenerator { __ jcc(Assembler::notZero, L_copy_8_bytes); } restore_arg_regs(); - inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jshort_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2312,7 +2307,7 @@ class StubGenerator: public StubCodeGenerator { copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); } restore_arg_regs(); - inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jshort_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2419,7 +2414,7 @@ class StubGenerator: public StubCodeGenerator { address ucme_exit_pc = __ pc(); bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); restore_arg_regs_using_thread(); - inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jint_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ vzeroupper(); __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2524,7 +2519,7 @@ class StubGenerator: public StubCodeGenerator { __ jmp(L_exit); } restore_arg_regs_using_thread(); - inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jint_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2540,7 +2535,7 @@ class StubGenerator: public StubCodeGenerator { __ BIND(L_exit); bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); restore_arg_regs_using_thread(); - inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jint_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2632,7 +2627,7 @@ class StubGenerator: public StubCodeGenerator { __ jmp(L_exit); } else { restore_arg_regs_using_thread(); - inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jlong_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2649,11 +2644,9 @@ class StubGenerator: public StubCodeGenerator { __ BIND(L_exit); bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); restore_arg_regs_using_thread(); - if (is_oop) { - inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free - } else { - inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free - } + INC_COUNTER_NP(is_oop ? SharedRuntime::_oop_array_copy_ctr : + SharedRuntime::_jlong_array_copy_ctr, + rscratch1); // Update counter after rscratch1 is free __ vzeroupper(); __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2734,7 +2727,7 @@ class StubGenerator: public StubCodeGenerator { __ jmp(L_exit); } else { restore_arg_regs_using_thread(); - inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_jlong_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 __ vzeroupper(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2750,11 +2743,9 @@ class StubGenerator: public StubCodeGenerator { __ BIND(L_exit); bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); restore_arg_regs_using_thread(); - if (is_oop) { - inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free - } else { - inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free - } + INC_COUNTER_NP(is_oop ? SharedRuntime::_oop_array_copy_ctr : + SharedRuntime::_jlong_array_copy_ctr, + rscratch1); // Update counter after rscratch1 is free __ vzeroupper(); __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2974,7 +2965,7 @@ class StubGenerator: public StubCodeGenerator { __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); restore_arg_regs(); - inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free + INC_COUNTER_NP(SharedRuntime::_checkcast_array_copy_ctr, rscratch1); // Update counter after rscratch1 is free __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -3015,7 +3006,7 @@ class StubGenerator: public StubCodeGenerator { __ enter(); // required for proper stackwalking of RuntimeStub frame // bump this on entry, not on exit: - inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); + INC_COUNTER_NP(SharedRuntime::_unsafe_array_copy_ctr, rscratch1); __ mov(bits, from); __ orptr(bits, to); @@ -3138,7 +3129,7 @@ class StubGenerator: public StubCodeGenerator { #endif // bump this on entry, not on exit: - inc_counter_np(SharedRuntime::_generic_array_copy_ctr); + INC_COUNTER_NP(SharedRuntime::_generic_array_copy_ctr, rscratch1); //----------------------------------------------------------------------- // Assembler stub will be used for this call to arraycopy @@ -7438,7 +7429,7 @@ address generate_avx_ghash_processBlocks() { OopMap* map = new OopMap(framesize, 1); oop_maps->add_gc_map(frame_complete, map); - __ set_last_Java_frame(rsp, rbp, the_pc); + __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1); __ movptr(c_rarg0, r15_thread); __ movptr(c_rarg1, rsp); __ call_VM_leaf(Continuation::freeze_entry(), 2); @@ -7450,7 +7441,7 @@ address generate_avx_ghash_processBlocks() { __ jcc(Assembler::notZero, L_pinned); __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset())); - continuation_enter_cleanup(_masm); + __ continuation_enter_cleanup(); __ pop(rbp); __ ret(0); @@ -7630,7 +7621,7 @@ address generate_avx_ghash_processBlocks() { int frame_complete = the_pc - start; - __ set_last_Java_frame(rsp, rbp, the_pc); + __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1); __ movptr(c_rarg0, r15_thread); __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); __ reset_last_Java_frame(true); @@ -7723,7 +7714,7 @@ address generate_avx_ghash_processBlocks() { // Set up last_Java_sp and last_Java_fp address the_pc = __ pc(); - __ set_last_Java_frame(rsp, rbp, the_pc); + __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1); __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack // Call runtime @@ -8166,99 +8157,3 @@ void StubGenerator_generate(CodeBuffer* code, int phase) { } #undef __ -#define __ masm-> - -//---------------------------- continuation_enter_setup --------------------------- -// -// Arguments: -// None. -// -// Results: -// rsp: pointer to blank ContinuationEntry -// -// Kills: -// rax -// -OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) { - assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); - assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); - assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); - - stack_slots += checked_cast(ContinuationEntry::size()) / wordSize; - __ subptr(rsp, checked_cast(ContinuationEntry::size())); - - int frame_size = (checked_cast(ContinuationEntry::size()) + wordSize) / VMRegImpl::stack_slot_size; - OopMap* map = new OopMap(frame_size, 0); - ContinuationEntry::setup_oopmap(map); - - __ movptr(rax, Address(r15_thread, JavaThread::cont_entry_offset())); - __ movptr(Address(rsp, ContinuationEntry::parent_offset()), rax); - __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rsp); - - return map; -} - -//---------------------------- fill_continuation_entry --------------------------- -// -// Arguments: -// rsp: pointer to blank Continuation entry -// reg_cont_obj: pointer to the continuation -// reg_flags: flags -// -// Results: -// rsp: pointer to filled out ContinuationEntry -// -// Kills: -// rax -// -void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) { - assert_different_registers(rax, reg_cont_obj, reg_flags); - - DEBUG_ONLY(__ movl(Address(rsp, ContinuationEntry::cookie_offset()), ContinuationEntry::cookie_value());) - - __ movptr(Address(rsp, ContinuationEntry::cont_offset()), reg_cont_obj); - __ movl (Address(rsp, ContinuationEntry::flags_offset()), reg_flags); - __ movptr(Address(rsp, ContinuationEntry::chunk_offset()), 0); - __ movl(Address(rsp, ContinuationEntry::argsize_offset()), 0); - __ movl(Address(rsp, ContinuationEntry::pin_count_offset()), 0); - - __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset())); - __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax); - __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset())); - __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax); - - __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0); - __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0); -} - -//---------------------------- continuation_enter_cleanup --------------------------- -// -// Arguments: -// rsp: pointer to the ContinuationEntry -// -// Results: -// rsp: pointer to the spilled rbp in the entry frame -// -// Kills: -// rbx -// -void continuation_enter_cleanup(MacroAssembler* masm) { -#ifdef ASSERT - Label L_good_sp; - __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset())); - __ jcc(Assembler::equal, L_good_sp); - __ stop("Incorrect rsp at continuation_enter_cleanup"); - __ bind(L_good_sp); -#endif - - __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset())); - __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx); - __ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset())); - __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx); - - __ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset())); - __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx); - __ addptr(rsp, checked_cast(ContinuationEntry::size())); -} - -#undef __ diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp index 940c2e4038d10..d49fdcf610147 100644 --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp @@ -581,7 +581,7 @@ void TemplateInterpreterGenerator::lock_method() { // get receiver (assume this is frequent case) __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); __ jcc(Assembler::zero, done); - __ load_mirror(rax, rbx); + __ load_mirror(rax, rbx, rscratch2); #ifdef ASSERT { @@ -625,7 +625,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase __ push(rbx); // save Method* // Get mirror and store it in the frame as GC root for this Method* - __ load_mirror(rdx, rbx); + __ load_mirror(rdx, rbx, rscratch2); __ push(rdx); if (ProfileInterpreter) { Label method_data_continue; @@ -790,7 +790,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // native method than the typical interpreter frame setup. address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // determine code generation flags - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // rbx: Method* // rbcp: sender sp @@ -1000,7 +1000,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { Label L; __ movptr(rax, Address(method, Method::native_function_offset())); ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); - __ cmpptr(rax, unsatisfied.addr()); + __ cmpptr(rax, unsatisfied.addr(), rscratch1); __ jcc(Assembler::notEqual, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -1020,13 +1020,13 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // set_last_Java_frame_before_call // It is enough that the pc() // points into the right code segment. It does not have to be the correct return pc. - __ set_last_Java_frame(thread, noreg, rbp, __ pc()); + __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); #else __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); // It is enough that the pc() points into the right code // segment. It does not have to be the correct return pc. - __ set_last_Java_frame(rsp, rbp, (address) __ pc()); + __ set_last_Java_frame(rsp, rbp, (address) __ pc(), rscratch1); #endif // _LP64 // change thread state @@ -1052,7 +1052,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // 64: result potentially in rax or xmm0 // Verify or restore cpu control state after JNI call - __ restore_cpu_control_state_after_jni(); + __ restore_cpu_control_state_after_jni(rscratch1); // NOTE: The order of these pushes is known to frame::interpreter_frame_result // in order to extract the result of a method call. If the order of these @@ -1075,10 +1075,10 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), - float_handler.addr()); + float_handler.addr(), noreg); __ jcc(Assembler::equal, push_double); __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), - double_handler.addr()); + double_handler.addr(), noreg); __ jcc(Assembler::notEqual, L); __ bind(push_double); __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). @@ -1327,7 +1327,7 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) { // address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { // determine code generation flags - bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; + bool inc_counter = UseCompiler || CountCompiledCalls; // ebx: Method* // rbcp: sender sp (set in InterpreterMacroAssembler::prepare_to_jump_from_interpreted / generate_call_stub) @@ -1619,14 +1619,14 @@ void TemplateInterpreterGenerator::generate_throw_exception() { __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ get_thread(thread); // PC must point into interpreter here - __ set_last_Java_frame(thread, noreg, rbp, __ pc()); + __ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); __ get_thread(thread); #else __ mov(c_rarg1, rsp); __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); // PC must point into interpreter here - __ set_last_Java_frame(noreg, rbp, __ pc()); + __ set_last_Java_frame(noreg, rbp, __ pc(), rscratch1); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); #endif __ reset_last_Java_frame(thread, true); @@ -1822,11 +1822,11 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) { } void TemplateInterpreterGenerator::count_bytecode() { - __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); + __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value), rscratch1); } void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { - __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); + __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]), rscratch1); } void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { @@ -1835,7 +1835,7 @@ void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { __ orl(rbx, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); - __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); + __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx, rscratch1); __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); __ incrementl(Address(rscratch1, rbx, Address::times_4)); } @@ -1863,7 +1863,8 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) { void TemplateInterpreterGenerator::stop_interpreter_at() { Label L; __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), - StopInterpreterAt); + StopInterpreterAt, + rscratch1); __ jcc(Assembler::notEqual, L); __ int3(); __ bind(L); diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index bae37ecb5f40d..8566c680fd726 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -289,10 +289,10 @@ void TemplateTable::fconst(int value) { __ xorps(xmm0, xmm0); break; case 1: - __ movflt(xmm0, ExternalAddress((address) &one)); + __ movflt(xmm0, ExternalAddress((address) &one), rscratch1); break; case 2: - __ movflt(xmm0, ExternalAddress((address) &two)); + __ movflt(xmm0, ExternalAddress((address) &two), rscratch1); break; default: ShouldNotReachHere(); @@ -320,7 +320,7 @@ void TemplateTable::dconst(int value) { __ xorpd(xmm0, xmm0); break; case 1: - __ movdbl(xmm0, ExternalAddress((address) &one)); + __ movdbl(xmm0, ExternalAddress((address) &one), rscratch1); break; default: ShouldNotReachHere(); @@ -446,7 +446,7 @@ void TemplateTable::fast_aldc(bool wide) { Label notNull; ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr()); __ movptr(tmp, null_sentinel); - __ resolve_oop_handle(tmp); + __ resolve_oop_handle(tmp, rscratch2); __ cmpoop(tmp, result); __ jccb(Assembler::notEqual, notNull); __ xorptr(result, result); // NULL object reference @@ -1127,11 +1127,10 @@ void TemplateTable::aastore() { __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); // Move subklass into rbx - __ load_klass(rbx, rax, tmp_load_klass); + __ load_klass(rbx, rax, rscratch1); // Move superklass into rax - __ load_klass(rax, rdx, tmp_load_klass); + __ load_klass(rax, rdx, rscratch1); __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset())); @@ -1174,8 +1173,7 @@ void TemplateTable::bastore() { index_check(rdx, rbx); // prefer index in rbx // Need to check whether array is boolean or byte // since both types share the bastore bytecode. - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - __ load_klass(rcx, rdx, tmp_load_klass); + __ load_klass(rcx, rdx, rscratch1); __ movl(rcx, Address(rcx, Klass::layout_helper_offset())); int diffbit = Klass::layout_helper_boolean_diffbit(); __ testl(rcx, diffbit); @@ -1546,7 +1544,7 @@ void TemplateTable::fop2(Operation op) { __ movflt(xmm1, xmm0); __ pop_f(xmm0); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); -#else +#else // !_LP64 __ push_f(xmm0); __ pop_f(); __ fld_s(at_rsp()); @@ -1555,7 +1553,7 @@ void TemplateTable::fop2(Operation op) { __ pop(rax); // pop second operand off the stack __ push_f(); __ pop_f(xmm0); -#endif +#endif // _LP64 break; default: ShouldNotReachHere(); @@ -1564,7 +1562,7 @@ void TemplateTable::fop2(Operation op) { } else { #ifdef _LP64 ShouldNotReachHere(); -#else +#else // !_LP64 switch (op) { case add: __ fadd_s (at_rsp()); break; case sub: __ fsubr_s(at_rsp()); break; @@ -1609,7 +1607,7 @@ void TemplateTable::dop2(Operation op) { __ movdbl(xmm1, xmm0); __ pop_d(xmm0); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); -#else +#else // !_LP64 __ push_d(xmm0); __ pop_d(); __ fld_d(at_rsp()); @@ -1619,7 +1617,7 @@ void TemplateTable::dop2(Operation op) { __ pop(rdx); __ push_d(); __ pop_d(xmm0); -#endif +#endif // _LP64 break; default: ShouldNotReachHere(); @@ -1628,7 +1626,7 @@ void TemplateTable::dop2(Operation op) { } else { #ifdef _LP64 ShouldNotReachHere(); -#else +#else // !_LP64 switch (op) { case add: __ fadd_d (at_rsp()); break; case sub: __ fsubr_d(at_rsp()); break; @@ -1657,7 +1655,7 @@ void TemplateTable::dop2(Operation op) { // Pop double precision number from rsp. __ pop(rax); __ pop(rdx); -#endif +#endif // _LP64 } } @@ -1691,7 +1689,7 @@ void TemplateTable::fneg() { transition(ftos, ftos); if (UseSSE >= 1) { static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); - __ xorps(xmm0, ExternalAddress((address) float_signflip)); + __ xorps(xmm0, ExternalAddress((address) float_signflip), rscratch1); } else { LP64_ONLY(ShouldNotReachHere()); NOT_LP64(__ fchs()); @@ -1703,7 +1701,7 @@ void TemplateTable::dneg() { if (UseSSE >= 2) { static jlong *double_signflip = double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); - __ xorpd(xmm0, ExternalAddress((address) double_signflip)); + __ xorpd(xmm0, ExternalAddress((address) double_signflip), rscratch1); } else { #ifdef _LP64 ShouldNotReachHere(); @@ -1824,7 +1822,7 @@ void TemplateTable::convert() { Label L; __ cvttss2siq(rax, xmm0); // NaN or overflow/underflow? - __ cmp64(rax, ExternalAddress((address) &is_nan)); + __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1); __ jcc(Assembler::notEqual, L); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); __ bind(L); @@ -1848,7 +1846,7 @@ void TemplateTable::convert() { Label L; __ cvttsd2siq(rax, xmm0); // NaN or overflow/underflow? - __ cmp64(rax, ExternalAddress((address) &is_nan)); + __ cmp64(rax, ExternalAddress((address) &is_nan), rscratch1); __ jcc(Assembler::notEqual, L); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); __ bind(L); @@ -1860,7 +1858,7 @@ void TemplateTable::convert() { default: ShouldNotReachHere(); } -#else +#else // !_LP64 // Checking #ifdef ASSERT { TosState tos_in = ilgl; @@ -2051,7 +2049,7 @@ void TemplateTable::convert() { default : ShouldNotReachHere(); } -#endif +#endif // _LP64 } void TemplateTable::lcmp() { @@ -2105,7 +2103,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) { } else { #ifdef _LP64 ShouldNotReachHere(); -#else +#else // !_LP64 if (is_float) { __ fld_s(at_rsp()); } else { @@ -2568,8 +2566,7 @@ void TemplateTable::_return(TosState state) { assert(state == vtos, "only valid state"); Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax); __ movptr(robj, aaddress(0)); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - __ load_klass(rdi, robj, tmp_load_klass); + __ load_klass(rdi, robj, rscratch1); __ movl(rdi, Address(rdi, Klass::access_flags_offset())); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; @@ -2717,7 +2714,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj, ConstantPoolCacheEntry::f1_offset()))); const int mirror_offset = in_bytes(Klass::java_mirror_offset()); __ movptr(obj, Address(obj, mirror_offset)); - __ resolve_oop_handle(obj); + __ resolve_oop_handle(obj, rscratch2); } } @@ -3609,9 +3606,12 @@ void TemplateTable::prepare_invoke(int byte_no, { const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); ExternalAddress table(table_addr); - LP64_ONLY(__ lea(rscratch1, table)); - LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr))); - NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)))); +#ifdef _LP64 + __ lea(rscratch1, table); + __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); +#else + __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); +#endif // _LP64 } // push return address @@ -3659,8 +3659,7 @@ void TemplateTable::invokevirtual_helper(Register index, // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - __ load_klass(rax, recv, tmp_load_klass); + __ load_klass(rax, recv, rscratch1); // profile this call __ profile_virtual_call(rax, rlocals, rdx); @@ -3752,8 +3751,7 @@ void TemplateTable::invokeinterface(int byte_no) { // Get receiver klass into rlocals - also a null check __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - __ load_klass(rlocals, rcx, tmp_load_klass); + __ load_klass(rlocals, rcx, rscratch1); Label subtype; __ check_klass_subtype(rlocals, rax, rbcp, subtype); @@ -3776,7 +3774,7 @@ void TemplateTable::invokeinterface(int byte_no) { // Get receiver klass into rdx - also a null check __ restore_locals(); // restore r14 __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - __ load_klass(rdx, rcx, tmp_load_klass); + __ load_klass(rdx, rcx, rscratch1); Label no_such_method; @@ -4005,11 +4003,10 @@ void TemplateTable::_new() { __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops #endif - Register tmp_store_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - __ store_klass(rax, rcx, tmp_store_klass); // klass + __ store_klass(rax, rcx, rscratch1); // klass { - SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); + SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0, rscratch1); // Trigger dtrace event for fastpath __ push(atos); __ call_VM_leaf( @@ -4100,8 +4097,7 @@ void TemplateTable::checkcast() { __ load_resolved_klass_at_index(rax, rcx, rbx); __ bind(resolved); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - __ load_klass(rbx, rdx, tmp_load_klass); + __ load_klass(rbx, rdx, rscratch1); // Generate subtype check. Blows rcx, rdi. Object in rdx. // Superklass in rax. Subklass in rbx. @@ -4158,13 +4154,12 @@ void TemplateTable::instanceof() { __ pop_ptr(rdx); // restore receiver __ verify_oop(rdx); - Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); - __ load_klass(rdx, rdx, tmp_load_klass); + __ load_klass(rdx, rdx, rscratch1); __ jmpb(resolved); // Get superklass in rax and subklass in rdx __ bind(quicked); - __ load_klass(rdx, rax, tmp_load_klass); + __ load_klass(rdx, rax, rscratch1); __ load_resolved_klass_at_index(rax, rcx, rbx); __ bind(resolved); @@ -4403,7 +4398,7 @@ void TemplateTable::wide() { transition(vtos, vtos); __ load_unsigned_byte(rbx, at_bcp(1)); ExternalAddress wtable((address)Interpreter::_wentry_point); - __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr))); + __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)), rscratch1); // Note: the rbcp increment step is part of the individual wide bytecode implementations } diff --git a/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp b/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp index 45002227372e0..c2aa7f0b29640 100644 --- a/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp +++ b/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp @@ -115,9 +115,9 @@ static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDesc __ movl(rax, mxcsr_save); __ andl(rax, MXCSR_MASK); // Only check control and mask bits ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); - __ cmp32(rax, mxcsr_std); + __ cmp32(rax, mxcsr_std, rscratch1); __ jcc(Assembler::equal, skip_ldmx); - __ ldmxcsr(mxcsr_std); + __ ldmxcsr(mxcsr_std, rscratch1); __ bind(skip_ldmx); } #endif diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp index e54b34708d1ea..4c5d4762beedf 100644 --- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp +++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp @@ -48,7 +48,6 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(true); - Register tmp_load_klass = rscratch1; VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { @@ -70,7 +69,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { - __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); + __ incrementq(ExternalAddress(SharedRuntime::nof_megamorphic_calls_addr()), rscratch1); } #endif @@ -81,7 +80,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // get receiver klass address npe_addr = __ pc(); - __ load_klass(rax, j_rarg0, tmp_load_klass); + __ load_klass(rax, j_rarg0, rscratch1); #ifndef PRODUCT if (DebugVtables) { @@ -164,7 +163,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { - __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); + __ incrementq(ExternalAddress(SharedRuntime::nof_megamorphic_calls_addr()), rscratch1); } #endif // PRODUCT diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index bdbbd57435649..418ede29a286c 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -1343,7 +1343,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { __ subptr(Address(rsp, 0), __ offset() - offset); #else InternalAddress here(__ pc()); - __ pushptr(here.addr()); + __ pushptr(here.addr(), noreg); #endif __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); @@ -1392,8 +1392,6 @@ Assembler::Width widthForType(BasicType bt) { static address vector_long_shufflemask() { return StubRoutines::x86::vector_long_shuffle_mask(); } static address vector_32_bit_mask() { return StubRoutines::x86::vector_32_bit_mask(); } static address vector_64_bit_mask() { return StubRoutines::x86::vector_64_bit_mask(); } - static address vector_float_signflip() { return StubRoutines::x86::vector_float_sign_flip();} - static address vector_double_signflip() { return StubRoutines::x86::vector_double_sign_flip();} //============================================================================= const bool Matcher::match_rule_supported(int opcode) { @@ -7352,9 +7350,9 @@ instruct castFtoI_reg_avx(vec dst, vec src, vec xtmp1, vec xtmp2, vec xtmp3, vec format %{ "vector_cast_f2i $dst,$src\t! using $xtmp1, $xtmp2, $xtmp3, $xtmp4 as TEMP" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); - __ vector_castF2I_avx($dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $xtmp3$$XMMRegister, $xtmp4$$XMMRegister, - ExternalAddress(vector_float_signflip()), noreg, vlen_enc); + __ vector_castF2I_avx($dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), vlen_enc, + $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $xtmp3$$XMMRegister, $xtmp4$$XMMRegister); %} ins_pipe( pipe_slow ); %} @@ -7368,9 +7366,9 @@ instruct castFtoI_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, k format %{ "vector_cast_f2i $dst,$src\t! using $xtmp1, $xtmp2, $ktmp1, $ktmp2 as TEMP" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); - __ vector_castF2I_evex($dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister, - ExternalAddress(vector_float_signflip()), noreg, vlen_enc); + __ vector_castF2I_evex($dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), vlen_enc, + $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister); %} ins_pipe( pipe_slow ); %} @@ -7387,14 +7385,14 @@ instruct castFtoX_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, k BasicType to_elem_bt = Matcher::vector_element_basic_type(this); if (to_elem_bt == T_LONG) { int vlen_enc = vector_length_encoding(this); - __ vector_castF2L_evex($dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister, - ExternalAddress(vector_double_signflip()), noreg, vlen_enc); + __ vector_castF2L_evex($dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), vlen_enc, + $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister); } else { int vlen_enc = vector_length_encoding(this, $src); - __ vector_castF2I_evex($dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister, - ExternalAddress(vector_float_signflip()), noreg, vlen_enc); + __ vector_castF2I_evex($dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), vlen_enc, + $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister); if (to_elem_bt == T_SHORT) { __ evpmovdw($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); } else { @@ -7425,9 +7423,9 @@ instruct castDtoX_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, k ins_encode %{ int vlen_enc = vector_length_encoding(this, $src); BasicType to_elem_bt = Matcher::vector_element_basic_type(this); - __ vector_castD2X_evex(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister, - ExternalAddress(vector_double_signflip()), noreg, vlen_enc); + __ vector_castD2X_evex(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), vlen_enc, + $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister); %} ins_pipe( pipe_slow ); %} @@ -7449,55 +7447,57 @@ instruct vucast(vec dst, vec src) %{ %} #ifdef _LP64 -instruct vround_float_avx(vec dst, vec src, vec xtmp1, vec xtmp2, vec xtmp3, vec xtmp4, rRegP scratch, rFlagsReg cr) %{ +instruct vround_float_avx(vec dst, vec src, rRegP tmp, vec xtmp1, vec xtmp2, vec xtmp3, vec xtmp4, rFlagsReg cr) %{ predicate(!VM_Version::supports_avx512vl() && Matcher::vector_length_in_bytes(n) < 64 && Matcher::vector_element_basic_type(n) == T_INT); match(Set dst (RoundVF src)); - effect(TEMP dst, TEMP xtmp1, TEMP xtmp2, TEMP xtmp3, TEMP xtmp4, TEMP scratch, KILL cr); - format %{ "vector_round_float $dst,$src\t! using $xtmp1, $xtmp2, $xtmp3, $xtmp4 and $scratch as TEMP" %} + effect(TEMP dst, TEMP tmp, TEMP xtmp1, TEMP xtmp2, TEMP xtmp3, TEMP xtmp4, KILL cr); + format %{ "vector_round_float $dst,$src\t! using $tmp, $xtmp1, $xtmp2, $xtmp3, $xtmp4 as TEMP" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); InternalAddress new_mxcsr = $constantaddress((jint)0x3F80); - __ vector_round_float_avx($dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $xtmp3$$XMMRegister, $xtmp4$$XMMRegister, - ExternalAddress(vector_float_signflip()), new_mxcsr, $scratch$$Register, vlen_enc); + __ vector_round_float_avx($dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), new_mxcsr, vlen_enc, + $tmp$$Register, $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $xtmp3$$XMMRegister, $xtmp4$$XMMRegister); %} ins_pipe( pipe_slow ); %} -instruct vround_float_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, kReg ktmp2, rRegP scratch, rFlagsReg cr) %{ +instruct vround_float_evex(vec dst, vec src, rRegP tmp, vec xtmp1, vec xtmp2, kReg ktmp1, kReg ktmp2, rFlagsReg cr) %{ predicate((VM_Version::supports_avx512vl() || Matcher::vector_length_in_bytes(n) == 64) && Matcher::vector_element_basic_type(n) == T_INT); match(Set dst (RoundVF src)); - effect(TEMP dst, TEMP xtmp1, TEMP xtmp2, TEMP ktmp1, TEMP ktmp2, TEMP scratch, KILL cr); - format %{ "vector_round_float $dst,$src\t! using $xtmp1, $xtmp2, $ktmp1, $ktmp2 and $scratch as TEMP" %} + effect(TEMP dst, TEMP tmp, TEMP xtmp1, TEMP xtmp2, TEMP ktmp1, TEMP ktmp2, KILL cr); + format %{ "vector_round_float $dst,$src\t! using $tmp, $xtmp1, $xtmp2, $ktmp1, $ktmp2 as TEMP" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); InternalAddress new_mxcsr = $constantaddress((jint)0x3F80); - __ vector_round_float_evex($dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister, - ExternalAddress(vector_float_signflip()), new_mxcsr, $scratch$$Register, vlen_enc); + __ vector_round_float_evex($dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), new_mxcsr, vlen_enc, + $tmp$$Register, $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister); %} ins_pipe( pipe_slow ); %} -instruct vround_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, kReg ktmp2, rRegP scratch, rFlagsReg cr) %{ +instruct vround_reg_evex(vec dst, vec src, rRegP tmp, vec xtmp1, vec xtmp2, kReg ktmp1, kReg ktmp2, rFlagsReg cr) %{ predicate(Matcher::vector_element_basic_type(n) == T_LONG); match(Set dst (RoundVD src)); - effect(TEMP dst, TEMP xtmp1, TEMP xtmp2, TEMP ktmp1, TEMP ktmp2, TEMP scratch, KILL cr); - format %{ "vector_round_long $dst,$src\t! using $xtmp1, $xtmp2, $ktmp1, $ktmp2 and $scratch as TEMP" %} + effect(TEMP dst, TEMP tmp, TEMP xtmp1, TEMP xtmp2, TEMP ktmp1, TEMP ktmp2, KILL cr); + format %{ "vector_round_long $dst,$src\t! using $tmp, $xtmp1, $xtmp2, $ktmp1, $ktmp2 as TEMP" %} ins_encode %{ int vlen_enc = vector_length_encoding(this); InternalAddress new_mxcsr = $constantaddress((jint)0x3F80); - __ vector_round_double_evex($dst$$XMMRegister, $src$$XMMRegister, $xtmp1$$XMMRegister, - $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister, - ExternalAddress(vector_double_signflip()), new_mxcsr, $scratch$$Register, vlen_enc); + __ vector_round_double_evex($dst$$XMMRegister, $src$$XMMRegister, + ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), new_mxcsr, vlen_enc, + $tmp$$Register, $xtmp1$$XMMRegister, $xtmp2$$XMMRegister, $ktmp1$$KRegister, $ktmp2$$KRegister); %} ins_pipe( pipe_slow ); %} -#endif + +#endif // _LP64 + // --------------------------------- VectorMaskCmp -------------------------------------- instruct vcmpFD(legVec dst, legVec src1, legVec src2, immI8 cond) %{ @@ -9390,8 +9390,8 @@ instruct vreverse_reg_gfni(vec dst, vec src, vec xtmp) %{ int vec_enc = vector_length_encoding(this); BasicType bt = Matcher::vector_element_basic_type(this); InternalAddress addr = $constantaddress(T_LONG, vreplicate_imm(T_LONG, 0x8040201008040201L, 1)); - __ vector_reverse_bit_gfni(bt, $dst$$XMMRegister, $src$$XMMRegister, $xtmp$$XMMRegister, - addr, noreg, vec_enc); + __ vector_reverse_bit_gfni(bt, $dst$$XMMRegister, $src$$XMMRegister, addr, vec_enc, + $xtmp$$XMMRegister); %} ins_pipe( pipe_slow ); %} diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad index 795250c9e366f..9468b882dc177 100644 --- a/src/hotspot/cpu/x86/x86_32.ad +++ b/src/hotspot/cpu/x86/x86_32.ad @@ -12629,7 +12629,7 @@ instruct jumpXtnd(rRegI switch_val) %{ ins_encode %{ // Jump to Address(table_base + switch_reg) Address index(noreg, $switch_val$$Register, Address::times_1); - __ jump(ArrayAddress($constantaddress, index)); + __ jump(ArrayAddress($constantaddress, index), noreg); %} ins_pipe(pipe_jmp); %} diff --git a/src/hotspot/cpu/zero/nativeInst_zero.cpp b/src/hotspot/cpu/zero/nativeInst_zero.cpp index c26a4245c2c21..53f6fcef83019 100644 --- a/src/hotspot/cpu/zero/nativeInst_zero.cpp +++ b/src/hotspot/cpu/zero/nativeInst_zero.cpp @@ -30,7 +30,7 @@ #include "nativeInst_zero.hpp" #include "runtime/sharedRuntime.hpp" -// This method is called by nmethod::make_not_entrant_or_zombie to +// This method is called by nmethod::make_not_entrant to // insert a jump to SharedRuntime::get_handle_wrong_method_stub() // (dest) at the start of a compiled method (verified_entry) to avoid // a race where a method is invoked while being made non-entrant. diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp index 6eb1f118744ea..baa7a40a9146a 100644 --- a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp +++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp @@ -528,7 +528,30 @@ jlong CgroupSubsystem::memory_limit_in_bytes() { if (!memory_limit->should_check_metric()) { return memory_limit->value(); } + jlong phys_mem = os::Linux::physical_memory(); + log_trace(os, container)("total physical memory: " JLONG_FORMAT, phys_mem); jlong mem_limit = read_memory_limit_in_bytes(); + + if (mem_limit <= 0 || mem_limit >= phys_mem) { + jlong read_mem_limit = mem_limit; + const char *reason; + if (mem_limit >= phys_mem) { + // Exceeding physical memory is treated as unlimited. Cg v1's implementation + // of read_memory_limit_in_bytes() caps this at phys_mem since Cg v1 has no + // value to represent 'max'. Cg v2 may return a value >= phys_mem if e.g. the + // container engine was started with a memory flag exceeding it. + reason = "ignored"; + mem_limit = -1; + } else if (OSCONTAINER_ERROR == mem_limit) { + reason = "failed"; + } else { + assert(mem_limit == -1, "Expected unlimited"); + reason = "unlimited"; + } + log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value " JLONG_FORMAT, + reason, read_mem_limit, phys_mem); + } + // Update cached metric to avoid re-reading container settings too often memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT); return mem_limit; diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp index bfd81b340593d..e62dcf4f75975 100644 --- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp +++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp @@ -31,6 +31,7 @@ #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" +#include "os_linux.hpp" /* * Set directory to subsystem specific files based @@ -91,7 +92,7 @@ jlong CgroupV1Subsystem::read_memory_limit_in_bytes() { GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.limit_in_bytes", "Memory Limit is: " JULONG_FORMAT, JULONG_FORMAT, memlimit); - if (memlimit >= _unlimited_memory) { + if (memlimit >= os::Linux::physical_memory()) { log_trace(os, container)("Non-Hierarchical Memory Limit is: Unlimited"); CgroupV1MemoryController* mem_controller = reinterpret_cast(_memory->controller()); if (mem_controller->is_hierarchical()) { @@ -99,7 +100,7 @@ jlong CgroupV1Subsystem::read_memory_limit_in_bytes() { const char* format = "%s " JULONG_FORMAT; GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", matchline, "Hierarchical Memory Limit is: " JULONG_FORMAT, format, hier_memlimit) - if (hier_memlimit >= _unlimited_memory) { + if (hier_memlimit >= os::Linux::physical_memory()) { log_trace(os, container)("Hierarchical Memory Limit is: Unlimited"); } else { return (jlong)hier_memlimit; @@ -113,9 +114,11 @@ jlong CgroupV1Subsystem::read_memory_limit_in_bytes() { } jlong CgroupV1Subsystem::memory_and_swap_limit_in_bytes() { + julong host_total_memsw; GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit is: " JULONG_FORMAT, JULONG_FORMAT, memswlimit); - if (memswlimit >= _unlimited_memory) { + host_total_memsw = os::Linux::host_swap() + os::Linux::physical_memory(); + if (memswlimit >= host_total_memsw) { log_trace(os, container)("Non-Hierarchical Memory and Swap Limit is: Unlimited"); CgroupV1MemoryController* mem_controller = reinterpret_cast(_memory->controller()); if (mem_controller->is_hierarchical()) { @@ -123,7 +126,7 @@ jlong CgroupV1Subsystem::memory_and_swap_limit_in_bytes() { const char* format = "%s " JULONG_FORMAT; GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", matchline, "Hierarchical Memory and Swap Limit is : " JULONG_FORMAT, format, hier_memswlimit) - if (hier_memswlimit >= _unlimited_memory) { + if (hier_memswlimit >= host_total_memsw) { log_trace(os, container)("Hierarchical Memory and Swap Limit is: Unlimited"); } else { jlong swappiness = read_mem_swappiness(); @@ -158,7 +161,7 @@ jlong CgroupV1Subsystem::read_mem_swappiness() { jlong CgroupV1Subsystem::memory_soft_limit_in_bytes() { GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.soft_limit_in_bytes", "Memory Soft Limit is: " JULONG_FORMAT, JULONG_FORMAT, memsoftlimit); - if (memsoftlimit >= _unlimited_memory) { + if (memsoftlimit >= os::Linux::physical_memory()) { log_trace(os, container)("Memory Soft Limit is: Unlimited"); return (jlong)-1; } else { @@ -205,7 +208,7 @@ jlong CgroupV1Subsystem::kernel_memory_usage_in_bytes() { jlong CgroupV1Subsystem::kernel_memory_limit_in_bytes() { GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit is: " JULONG_FORMAT, JULONG_FORMAT, kmem_limit); - if (kmem_limit >= _unlimited_memory) { + if (kmem_limit >= os::Linux::physical_memory()) { return (jlong)-1; } return (jlong)kmem_limit; diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp index 04a18524608b9..07fac4a9461cd 100644 --- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp +++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp @@ -104,8 +104,6 @@ class CgroupV1Subsystem: public CgroupSubsystem { CachingCgroupController * cpu_controller() { return _cpu; } private: - julong _unlimited_memory; - /* controllers */ CachingCgroupController* _memory = NULL; CgroupV1Controller* _cpuset = NULL; @@ -128,7 +126,6 @@ class CgroupV1Subsystem: public CgroupSubsystem { _cpuacct = cpuacct; _pids = pids; _memory = new CachingCgroupController(memory); - _unlimited_memory = (LONG_MAX / os::vm_page_size()) * os::vm_page_size(); } }; diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp index d6f0d627fdffa..7802b595f5c13 100644 --- a/src/hotspot/os/linux/osContainer_linux.cpp +++ b/src/hotspot/os/linux/osContainer_linux.cpp @@ -43,8 +43,6 @@ CgroupSubsystem* cgroup_subsystem; * we are running under cgroup control. */ void OSContainer::init() { - jlong mem_limit; - assert(!_is_initialized, "Initializing OSContainer more than once"); _is_initialized = true; @@ -60,15 +58,8 @@ void OSContainer::init() { if (cgroup_subsystem == NULL) { return; // Required subsystem files not found or other error } - // We need to update the amount of physical memory now that - // cgroup subsystem files have been processed. - if ((mem_limit = cgroup_subsystem->memory_limit_in_bytes()) > 0) { - os::Linux::set_physical_memory(mem_limit); - log_info(os, container)("Memory Limit is: " JLONG_FORMAT, mem_limit); - } _is_containerized = true; - } const char * OSContainer::container_type() { diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 68f3bf704a11e..b2bd876a63ceb 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -200,15 +200,12 @@ julong os::Linux::available_memory() { julong avail_mem; if (OSContainer::is_containerized()) { - jlong mem_limit, mem_usage; - if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) { - log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value", - mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit); - } + jlong mem_limit = OSContainer::memory_limit_in_bytes(); + jlong mem_usage; if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) { log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage); } - if (mem_limit > 0 && mem_usage > 0 ) { + if (mem_limit > 0 && mem_usage > 0) { avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0; log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem); return avail_mem; @@ -229,8 +226,6 @@ julong os::physical_memory() { log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit); return mem_limit; } - log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value", - mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit); } phys_mem = Linux::physical_memory(); @@ -346,6 +341,14 @@ pid_t os::Linux::gettid() { return (pid_t)rslt; } +// Returns the amount of swap currently configured, in bytes. +// This can change at any time. +julong os::Linux::host_swap() { + struct sysinfo si; + sysinfo(&si); + return (julong)si.totalswap; +} + // Most versions of linux have a bug where the number of processors are // determined by looking at the /proc file system. In a chroot environment, // the system call returns 1. diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp index 9159509377ae5..95e604967619e 100644 --- a/src/hotspot/os/linux/os_linux.hpp +++ b/src/hotspot/os/linux/os_linux.hpp @@ -57,8 +57,6 @@ class os::Linux { static pthread_t _main_thread; static julong available_memory(); - static julong physical_memory() { return _physical_memory; } - static void set_physical_memory(julong phys_mem) { _physical_memory = phys_mem; } static int active_processor_count(); static void initialize_system_info(); @@ -131,6 +129,9 @@ class os::Linux { static address initial_thread_stack_bottom(void) { return _initial_thread_stack_bottom; } static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; } + static julong physical_memory() { return _physical_memory; } + static julong host_swap(); + static intptr_t* ucontext_get_sp(const ucontext_t* uc); static intptr_t* ucontext_get_fp(const ucontext_t* uc); diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp index 188a7adf02956..eb6307819b8ef 100644 --- a/src/hotspot/os/posix/signals_posix.cpp +++ b/src/hotspot/os/posix/signals_posix.cpp @@ -635,7 +635,7 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info, address pc = os::Posix::ucontext_get_pc(uc); assert(pc != NULL, ""); if (NativeDeoptInstruction::is_deopt_at(pc)) { - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); if (cb != NULL && cb->is_compiled()) { MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, t);) // can call PcDescCache::add_pc_desc CompiledMethod* cm = cb->as_compiled_method(); diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 80a96e6abd425..df75dc140f000 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -2679,7 +2679,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { if (exception_code == EXCEPTION_IN_PAGE_ERROR) { CompiledMethod* nm = NULL; if (in_java) { - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; } @@ -2698,9 +2698,9 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { if (in_java && (exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { - if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + if (nativeInstruction_at(pc)->is_sigill_not_entrant()) { if (TraceTraps) { - tty->print_cr("trap: zombie_not_entrant"); + tty->print_cr("trap: not_entrant"); } return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub()); } @@ -2729,7 +2729,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { // Check for UD trap caused by NOP patching. // If it is, patch return address to be deopt handler. if (NativeDeoptInstruction::is_deopt_at(pc)) { - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); if (cb != NULL && cb->is_compiled()) { CompiledMethod* cm = cb->as_compiled_method(); frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp index 6e2204aa6d046..39331116af22f 100644 --- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp +++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp @@ -193,7 +193,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // // SIGILL: the compiler generates illegal opcodes // at places where it wishes to interrupt the VM: - // Safepoints, Unreachable Code, Entry points of Zombie methods, + // Safepoints, Unreachable Code, Entry points of not entrant nmethods, // This results in a SIGILL with (*pc) == inserted illegal instruction. // // (so, SIGILLs with a pc inside the zero page are real errors) @@ -202,7 +202,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // The ppc trap instruction raises a SIGTRAP and is very efficient if it // does not trap. It is used for conditional branches that are expected // to be never taken. These are: - // - zombie methods + // - not entrant nmethods // - IC (inline cache) misses. // - null checks leading to UncommonTraps. // - range checks leading to Uncommon Traps. @@ -225,9 +225,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, CodeBlob *cb = NULL; int stop_type = -1; // Handle signal from NativeJump::patch_verified_entry(). - if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) { if (TraceTraps) { - tty->print_cr("trap: zombie_not_entrant"); + tty->print_cr("trap: not_entrant"); } stub = SharedRuntime::get_handle_wrong_method_stub(); goto run_stub; @@ -341,7 +341,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, else if (sig == SIGBUS) { // BugId 4454115: A read from a MappedByteBuffer can fault here if the // underlying file has been truncated. Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = cb ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp index da0dd1795d696..f7aad7cbd493f 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp @@ -246,9 +246,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // Handle signal from NativeJump::patch_verified_entry(). if ((sig == SIGILL) - && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + && nativeInstruction_at(pc)->is_sigill_not_entrant()) { if (TraceTraps) { - tty->print_cr("trap: zombie_not_entrant"); + tty->print_cr("trap: not_entrant"); } stub = SharedRuntime::get_handle_wrong_method_stub(); } else if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) { @@ -265,7 +265,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault // here if the underlying file has been truncated. // Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp index e8d4edd3bbff0..5dc80f2d9e78b 100644 --- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp +++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp @@ -440,7 +440,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault // here if the underlying file has been truncated. // Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp index 349420c65dc3c..a09e3d3e13361 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp @@ -207,9 +207,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // Handle signal from NativeJump::patch_verified_entry(). if ((sig == SIGILL || sig == SIGTRAP) - && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + && nativeInstruction_at(pc)->is_sigill_not_entrant()) { if (TraceTraps) { - tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); + tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); } stub = SharedRuntime::get_handle_wrong_method_stub(); } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { @@ -218,7 +218,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault // here if the underlying file has been truncated. // Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp index 61dcf9b702f43..7434c0f678789 100644 --- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp @@ -323,7 +323,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault // here if the underlying file has been truncated. // Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; if ((nm != NULL && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) { unsafe_access = true; @@ -331,12 +331,12 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, } else if (sig == SIGSEGV && MacroAssembler::uses_implicit_null_check(info->si_addr)) { // Determination of interpreter/vtable stub/compiled code null exception - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); if (cb != NULL) { stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); } - } else if (sig == SIGILL && *(int *)pc == NativeInstruction::zombie_illegal_instruction) { - // Zombie + } else if (sig == SIGILL && *(int *)pc == NativeInstruction::not_entrant_illegal_instruction) { + // Not entrant stub = SharedRuntime::get_handle_wrong_method_stub(); } } else if ((thread->thread_state() == _thread_in_vm || diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp index b52b4a9f9a3e4..b00c38637c8da 100644 --- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp +++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp @@ -248,9 +248,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, CodeBlob *cb = NULL; int stop_type = -1; // Handle signal from NativeJump::patch_verified_entry(). - if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) { if (TraceTraps) { - tty->print_cr("trap: zombie_not_entrant"); + tty->print_cr("trap: not_entrant"); } stub = SharedRuntime::get_handle_wrong_method_stub(); } @@ -356,7 +356,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, else if (sig == SIGBUS) { // BugId 4454115: A read from a MappedByteBuffer can fault here if the // underlying file has been truncated. Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { diff --git a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp index eeeca1a7dd64a..7285cbcab5b41 100644 --- a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp +++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp @@ -208,9 +208,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // Handle signal from NativeJump::patch_verified_entry(). if ((sig == SIGILL || sig == SIGTRAP) - && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + && nativeInstruction_at(pc)->is_sigill_not_entrant()) { if (TraceTraps) { - tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); + tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); } stub = SharedRuntime::get_handle_wrong_method_stub(); } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { @@ -219,7 +219,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault // here if the underlying file has been truncated. // Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp index 4dc15a07921dd..8783fedd2f03a 100644 --- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp +++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp @@ -242,9 +242,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // a fault inside compiled code, the interpreter, or a stub // Handle signal from NativeJump::patch_verified_entry(). - if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) { if (TraceTraps) { - tty->print_cr("trap: zombie_not_entrant (SIGILL)"); + tty->print_cr("trap: not_entrant (SIGILL)"); } stub = SharedRuntime::get_handle_wrong_method_stub(); } @@ -302,7 +302,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, else if (sig == SIGBUS) { // BugId 4454115: A read from a MappedByteBuffer can fault here if the // underlying file has been truncated. Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; if (nm != NULL && nm->has_unsafe_access()) { // We don't really need a stub here! Just set the pending exception and diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp index b178de941e8a5..156566440d871 100644 --- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp +++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp @@ -257,7 +257,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault // here if the underlying file has been truncated. // Do not crash the VM in such a case. - CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CodeBlob* cb = CodeCache::find_blob(pc); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp index 5a4c0a1c3e2f1..70d63b32f9dc8 100644 --- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp +++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp @@ -181,7 +181,7 @@ bool os::win32::register_code_area(char *low, char *high) { MacroAssembler* masm = new MacroAssembler(&cb); pDCD = (pDynamicCodeData) masm->pc(); - masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache)); + masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache), rscratch1); masm->flush(); // Create an Unwind Structure specifying no unwind info diff --git a/src/hotspot/share/c1/c1_Canonicalizer.cpp b/src/hotspot/share/c1/c1_Canonicalizer.cpp index a8a29797404b6..a19acff4d14d3 100644 --- a/src/hotspot/share/c1/c1_Canonicalizer.cpp +++ b/src/hotspot/share/c1/c1_Canonicalizer.cpp @@ -790,7 +790,7 @@ void Canonicalizer::do_If(If* x) { else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; } else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; } else { ShouldNotReachHere(); } - If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint()); + If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, x->state_before(), x->is_safepoint()); if (cmp->x() == cmp->y()) { do_If(canon); } else { diff --git a/src/hotspot/share/c1/c1_Compilation.cpp b/src/hotspot/share/c1/c1_Compilation.cpp index 07317c7b881d9..155e297abbe8f 100644 --- a/src/hotspot/share/c1/c1_Compilation.cpp +++ b/src/hotspot/share/c1/c1_Compilation.cpp @@ -716,31 +716,6 @@ void CompilationResourceObj::print_on(outputStream* st) const { st->print_cr("CompilationResourceObj(" INTPTR_FORMAT ")", p2i(this)); } -void Compilation::compile_only_this_method() { - ResourceMark rm; - fileStream stream(os::fopen("c1_compile_only", "wt")); - stream.print_cr("# c1 compile only directives"); - compile_only_this_scope(&stream, hir()->top_scope()); -} - -void Compilation::compile_only_this_scope(outputStream* st, IRScope* scope) { - st->print("CompileOnly="); - scope->method()->holder()->name()->print_symbol_on(st); - st->print("."); - scope->method()->name()->print_symbol_on(st); - st->cr(); -} - -void Compilation::exclude_this_method() { - fileStream stream(os::fopen(".hotspot_compiler", "at")); - stream.print("exclude "); - method()->holder()->name()->print_symbol_on(&stream); - stream.print(" "); - method()->name()->print_symbol_on(&stream); - stream.cr(); - stream.cr(); -} - // Called from debugger to get the interval with 'reg_num' during register allocation. Interval* find_interval(int reg_num) { return Compilation::current()->allocator()->find_interval_at(reg_num); diff --git a/src/hotspot/share/c1/c1_Compilation.hpp b/src/hotspot/share/c1/c1_Compilation.hpp index 3d9b689f96520..6b8093879305f 100644 --- a/src/hotspot/share/c1/c1_Compilation.hpp +++ b/src/hotspot/share/c1/c1_Compilation.hpp @@ -220,16 +220,6 @@ class Compilation: public StackObj { // timers static void print_timers(); -#ifndef PRODUCT - // debugging support. - // produces a file named c1compileonly in the current directory with - // directives to compile only the current method and it's inlines. - // The file can be passed to the command line option -XX:Flags= - void compile_only_this_method(); - void compile_only_this_scope(outputStream* st, IRScope* scope); - void exclude_this_method(); -#endif // PRODUCT - bool is_profiling() { return env()->comp_level() == CompLevel_full_profile || env()->comp_level() == CompLevel_limited_profile; @@ -263,9 +253,6 @@ class Compilation: public StackObj { return env()->comp_level() == CompLevel_full_profile && C1UpdateMethodData && MethodData::profile_return(); } - bool age_code() const { - return _method->profile_aging(); - } // will compilation make optimistic assumptions that might lead to // deoptimization and that the runtime will account for? diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index d940280579bb4..a787ca1cfcd0f 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -2683,10 +2683,6 @@ void LIRGenerator::do_Base(Base* x) { __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); } } - if (compilation()->age_code()) { - CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false); - decrement_age(info); - } // increment invocation counters if needed if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. profile_parameters(x); @@ -3253,27 +3249,6 @@ void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true); } -void LIRGenerator::decrement_age(CodeEmitInfo* info) { - ciMethod* method = info->scope()->method(); - MethodCounters* mc_adr = method->ensure_method_counters(); - if (mc_adr != NULL) { - LIR_Opr mc = new_pointer_register(); - __ move(LIR_OprFact::intptrConst(mc_adr), mc); - int offset = in_bytes(MethodCounters::nmethod_age_offset()); - LIR_Address* counter = new LIR_Address(mc, offset, T_INT); - LIR_Opr result = new_register(T_INT); - __ load(counter, result); - __ sub(result, LIR_OprFact::intConst(1), result); - __ store(result, counter); - // DeoptimizeStub will reexecute from the current state in code info. - CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured, - Deoptimization::Action_make_not_entrant); - __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0)); - __ branch(lir_cond_lessEqual, deopt); - } -} - - void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, ciMethod *method, LIR_Opr step, int frequency, int bci, bool backedge, bool notify) { diff --git a/src/hotspot/share/c1/c1_LIRGenerator.hpp b/src/hotspot/share/c1/c1_LIRGenerator.hpp index 55f24a4169d78..9642b72ffebf1 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.hpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp @@ -418,7 +418,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { increment_event_counter(info, step, bci, true); } } - void decrement_age(CodeEmitInfo* info); CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false); CodeEmitInfo* state_for(Instruction* x); diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp index 82b61756c825f..5c58f512d73e2 100644 --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -42,6 +42,7 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/scopeDesc.hpp" +#include "compiler/compilationLog.hpp" #include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compilerEvent.hpp" @@ -375,7 +376,7 @@ ciInstance* ciEnv::get_or_create_exception(jobject& handle, Symbol* name) { VM_ENTRY_MARK; if (handle == NULL) { // Cf. universe.cpp, creation of Universe::_null_ptr_exception_instance. - InstanceKlass* ik = SystemDictionary::find_instance_klass(name, Handle(), Handle()); + InstanceKlass* ik = SystemDictionary::find_instance_klass(THREAD, name, Handle(), Handle()); jobject objh = NULL; if (ik != NULL) { oop obj = ik->allocate_instance(THREAD); @@ -528,7 +529,7 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass, if (!require_local) { kls = SystemDictionary::find_constrained_instance_or_array_klass(current, sym, loader); } else { - kls = SystemDictionary::find_instance_or_array_klass(sym, loader, domain); + kls = SystemDictionary::find_instance_or_array_klass(current, sym, loader, domain); } found_klass = kls; } @@ -1067,6 +1068,9 @@ void ciEnv::register_method(ciMethod* target, return; } + // Check if memory should be freed before allocation + CodeCache::gc_on_allocation(); + // To prevent compile queue updates. MutexLocker locker(THREAD, MethodCompileQueue_lock); @@ -1158,12 +1162,6 @@ void ciEnv::register_method(ciMethod* target, nm->set_rtm_state(rtm_state); #endif - // Record successful registration. - // (Put nm into the task handle *before* publishing to the Java heap.) - if (task() != NULL) { - task()->set_code(nm); - } - if (entry_bci == InvocationEntryBci) { if (TieredCompilation) { // If there is an old version we're done with it @@ -1204,15 +1202,19 @@ void ciEnv::register_method(ciMethod* target, } } } - } // safepoints are allowed again + } + NoSafepointVerifier nsv; if (nm != NULL) { - // JVMTI -- compiled method notification (must be done outside lock) - nm->post_compiled_method_load_event(); + // Compilation succeeded, post what we know about it + nm->post_compiled_method(task()); + task()->set_num_inlined_bytecodes(num_inlined_bytecodes()); } else { // The CodeCache is full. record_failure("code cache is full"); } + + // safepoints are allowed again } // ------------------------------------------------------------------ diff --git a/src/hotspot/share/ci/ciEnv.hpp b/src/hotspot/share/ci/ciEnv.hpp index fb7591f53c314..59172b9ca57e0 100644 --- a/src/hotspot/share/ci/ciEnv.hpp +++ b/src/hotspot/share/ci/ciEnv.hpp @@ -387,7 +387,6 @@ class ciEnv : StackObj { int immediate_oops_patched, RTMState rtm_state = NoRTM); - // Access to certain well known ciObjects. #define VM_CLASS_FUNC(name, ignore_s) \ ciInstanceKlass* name() { \ diff --git a/src/hotspot/share/ci/ciMethod.cpp b/src/hotspot/share/ci/ciMethod.cpp index b8c9fe6efdaf6..245446804fd19 100644 --- a/src/hotspot/share/ci/ciMethod.cpp +++ b/src/hotspot/share/ci/ciMethod.cpp @@ -73,9 +73,6 @@ ciMethod::ciMethod(const methodHandle& h_m, ciInstanceKlass* holder) : assert(h_m() != NULL, "no null method"); assert(_holder->get_instanceKlass() == h_m->method_holder(), ""); - if (LogTouchedMethods) { - h_m->log_touched(Thread::current()); - } // These fields are always filled in in loaded methods. _flags = ciFlags(h_m->access_flags()); @@ -142,7 +139,6 @@ ciMethod::ciMethod(const methodHandle& h_m, ciInstanceKlass* holder) : constantPoolHandle cpool(Thread::current(), h_m->constants()); _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol); _method_data = NULL; - _nmethod_age = h_m->nmethod_age(); // Take a snapshot of these values, so they will be commensurate with the MDO. if (ProfileInterpreter || CompilerConfig::is_c1_profiling()) { int invcnt = h_m->interpreter_invocation_count(); @@ -1208,15 +1204,6 @@ bool ciMethod::check_call(int refinfo_index, bool is_static) const { } return false; } - -// ------------------------------------------------------------------ -// ciMethod::profile_aging -// -// Should the method be compiled with an age counter? -bool ciMethod::profile_aging() const { - return UseCodeAging && (!MethodCounters::is_nmethod_hot(nmethod_age()) && - !MethodCounters::is_nmethod_age_unset(nmethod_age())); -} // ------------------------------------------------------------------ // ciMethod::print_codes // diff --git a/src/hotspot/share/ci/ciMethod.hpp b/src/hotspot/share/ci/ciMethod.hpp index 011276964989c..4bc4cb1961c24 100644 --- a/src/hotspot/share/ci/ciMethod.hpp +++ b/src/hotspot/share/ci/ciMethod.hpp @@ -80,7 +80,6 @@ class ciMethod : public ciMetadata { int _max_locals; vmIntrinsicID _intrinsic_id; int _handler_count; - int _nmethod_age; int _interpreter_invocation_count; int _interpreter_throwout_count; int _instructions_size; @@ -191,10 +190,6 @@ class ciMethod : public ciMetadata { int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; } int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; } int size_of_parameters() const { check_is_loaded(); return _size_of_parameters; } - int nmethod_age() const { check_is_loaded(); return _nmethod_age; } - - // Should the method be compiled with an age counter? - bool profile_aging() const; // Code size for inlining decisions. int code_size_for_inlining(); diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp index 1767e616eed43..95c8b805b5fc2 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp @@ -550,10 +550,12 @@ void ClassLoaderDataGraph::purge(bool at_safepoint) { delete purge_me; classes_unloaded = true; } + + Metaspace::purge(classes_unloaded); if (classes_unloaded) { - Metaspace::purge(); set_metaspace_oom(false); } + DependencyContext::purge_dependency_contexts(); // If we're purging metadata at a safepoint, clean remaining @@ -572,18 +574,6 @@ void ClassLoaderDataGraph::purge(bool at_safepoint) { } } -int ClassLoaderDataGraph::resize_dictionaries() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); - int resized = 0; - assert (Dictionary::does_any_dictionary_needs_resizing(), "some dictionary should need resizing"); - FOR_ALL_DICTIONARY(cld) { - if (cld->dictionary()->resize_if_needed()) { - resized++; - } - } - return resized; -} - ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic() : _next_klass(NULL) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.hpp b/src/hotspot/share/classfile/classLoaderDataGraph.hpp index a3e23c32a9eae..73a22fca8a419 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.hpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.hpp @@ -104,8 +104,6 @@ class ClassLoaderDataGraph : public AllStatic { static void print_dictionary(outputStream* st); static void print_table_statistics(outputStream* st); - static int resize_dictionaries(); - static bool has_metaspace_oom() { return _metaspace_oom; } static void set_metaspace_oom(bool value) { _metaspace_oom = value; } diff --git a/src/hotspot/share/classfile/dictionary.cpp b/src/hotspot/share/classfile/dictionary.cpp index a78c553a0757e..c08adac51a608 100644 --- a/src/hotspot/share/classfile/dictionary.cpp +++ b/src/hotspot/share/classfile/dictionary.cpp @@ -44,94 +44,69 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepointVerifiers.hpp" +#include "utilities/concurrentHashTable.inline.hpp" #include "utilities/growableArray.hpp" -#include "utilities/hashtable.inline.hpp" +#include "utilities/tableStatistics.hpp" -// Optimization: if any dictionary needs resizing, we set this flag, -// so that we don't have to walk all dictionaries to check if any actually -// needs resizing, which is costly to do at Safepoint. -bool Dictionary::_some_dictionary_needs_resizing = false; - -Dictionary::Dictionary(ClassLoaderData* loader_data, int table_size, bool resizable) - : Hashtable(table_size, (int)sizeof(DictionaryEntry)), - _resizable(resizable), _needs_resizing(false), _loader_data(loader_data) { -}; +// 2^24 is max size, like StringTable. +const size_t END_SIZE = 24; +// If a chain gets to 100 something might be wrong +const size_t REHASH_LEN = 100; +Dictionary::Dictionary(ClassLoaderData* loader_data, size_t table_size, bool resizable) + : _resizable(resizable), _number_of_entries(0), _loader_data(loader_data) { -Dictionary::Dictionary(ClassLoaderData* loader_data, - int table_size, HashtableBucket* t, - int number_of_entries, bool resizable) - : Hashtable(table_size, (int)sizeof(DictionaryEntry), t, number_of_entries), - _resizable(resizable), _needs_resizing(false), _loader_data(loader_data) { -}; + size_t start_size_log_2 = MAX2(ceil_log2(table_size), (size_t)2); // 2 is minimum size even though some dictionaries only have one entry + size_t current_size = ((size_t)1) << start_size_log_2; + log_info(class, loader, data)("Dictionary start size: " SIZE_FORMAT " (" SIZE_FORMAT ")", + current_size, start_size_log_2); + _table = new ConcurrentTable(start_size_log_2, END_SIZE, REHASH_LEN); +} Dictionary::~Dictionary() { - DictionaryEntry* probe = NULL; - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) { - probe = *p; - *p = probe->next(); - free_entry(probe); - } - } - assert(number_of_entries() == 0, "should have removed all entries"); + // This deletes the table and all the nodes, by calling free_node in Config. + delete _table; +} + +uintx Dictionary::Config::get_hash(Value const& value, bool* is_dead) { + return value->instance_klass()->name()->identity_hash(); } -DictionaryEntry* Dictionary::new_entry(unsigned int hash, InstanceKlass* klass) { - DictionaryEntry* entry = (DictionaryEntry*)Hashtable::new_entry(hash, klass); - entry->release_set_pd_set(NULL); - assert(klass->is_instance_klass(), "Must be"); - return entry; +void* Dictionary::Config::allocate_node(void* context, size_t size, Value const& value) { + return AllocateHeap(size, mtClass); } -void Dictionary::free_entry(DictionaryEntry* entry) { +void Dictionary::Config::free_node(void* context, void* memory, Value const& value) { + delete value; // Call DictionaryEntry destructor + FreeHeap(memory); +} + +DictionaryEntry::DictionaryEntry(InstanceKlass* klass) : _instance_klass(klass) { + release_set_pd_set(nullptr); +} + +DictionaryEntry::~DictionaryEntry() { // avoid recursion when deleting linked list // pd_set is accessed during a safepoint. // This doesn't require a lock because nothing is reading this // entry anymore. The ClassLoader is dead. - while (entry->pd_set_acquire() != NULL) { - ProtectionDomainEntry* to_delete = entry->pd_set_acquire(); - entry->release_set_pd_set(to_delete->next_acquire()); + while (pd_set_acquire() != NULL) { + ProtectionDomainEntry* to_delete = pd_set_acquire(); + release_set_pd_set(to_delete->next_acquire()); delete to_delete; } - BasicHashtable::free_entry(entry); } const int _resize_load_trigger = 5; // load factor that will trigger the resize -bool Dictionary::does_any_dictionary_needs_resizing() { - return Dictionary::_some_dictionary_needs_resizing; +int Dictionary::table_size() const { + return 1 << _table->get_size_log2(Thread::current()); } -void Dictionary::check_if_needs_resize() { - if (_resizable == true) { - if (number_of_entries() > (_resize_load_trigger*table_size())) { - _needs_resizing = true; - Dictionary::_some_dictionary_needs_resizing = true; - } - } -} - -bool Dictionary::resize_if_needed() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - int desired_size = 0; - if (_needs_resizing == true) { - desired_size = calculate_resize(false); - assert(desired_size != 0, "bug in calculate_resize"); - if (desired_size == table_size()) { - _resizable = false; // hit max - } else { - if (!resize(desired_size)) { - // Something went wrong, turn resizing off - _resizable = false; - } - } - } - - _needs_resizing = false; - Dictionary::_some_dictionary_needs_resizing = false; - - return (desired_size != 0); +bool Dictionary::check_if_needs_resize() { + return (_resizable && + (_number_of_entries > (_resize_load_trigger * table_size())) && + !_table->is_max_size_reached()); } bool DictionaryEntry::is_valid_protection_domain(Handle protection_domain) { @@ -213,78 +188,85 @@ void DictionaryEntry::add_protection_domain(ClassLoaderData* loader_data, Handle // Just the classes from defining class loaders void Dictionary::classes_do(void f(InstanceKlass*)) { - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry* probe = bucket(index); - probe != NULL; - probe = probe->next()) { - InstanceKlass* k = probe->instance_klass(); - if (loader_data() == k->class_loader_data()) { - f(k); - } + auto doit = [&] (DictionaryEntry** value) { + InstanceKlass* k = (*value)->instance_klass(); + if (loader_data() == k->class_loader_data()) { + f(k); } - } -} + return true; + }; -// Added for initialize_itable_for_klass to handle exceptions -// Just the classes from defining class loaders -void Dictionary::classes_do(void f(InstanceKlass*, TRAPS), TRAPS) { - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry* probe = bucket(index); - probe != NULL; - probe = probe->next()) { - InstanceKlass* k = probe->instance_klass(); - if (loader_data() == k->class_loader_data()) { - f(k, CHECK); - } - } - } + _table->do_scan(Thread::current(), doit); } // All classes, and their class loaders, including initiating class loaders void Dictionary::all_entries_do(KlassClosure* closure) { - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry* probe = bucket(index); - probe != NULL; - probe = probe->next()) { - InstanceKlass* k = probe->instance_klass(); - closure->do_klass(k); - } - } + auto all_doit = [&] (DictionaryEntry** value) { + InstanceKlass* k = (*value)->instance_klass(); + closure->do_klass(k); + return true; + }; + + _table->do_scan(Thread::current(), all_doit); } // Used to scan and relocate the classes during CDS archive dump. void Dictionary::classes_do(MetaspaceClosure* it) { Arguments::assert_is_dumping_archive(); - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry* probe = bucket(index); - probe != NULL; - probe = probe->next()) { - it->push(probe->klass_addr()); - } - } -} + auto push = [&] (DictionaryEntry** value) { + InstanceKlass** k = (*value)->instance_klass_addr(); + it->push(k); + return true; + }; + _table->do_scan(Thread::current(), push); +} +class DictionaryLookup : StackObj { +private: + Symbol* _name; +public: + DictionaryLookup(Symbol* name) : _name(name) { } + uintx get_hash() const { + return _name->identity_hash(); + } + bool equals(DictionaryEntry** value, bool* is_dead) { + DictionaryEntry *entry = *value; + *is_dead = false; + return (entry->instance_klass()->name() == _name); + } +}; // Add a loaded class to the dictionary. -// Readers of the SystemDictionary aren't always locked, so _buckets -// is volatile. The store of the next field in the constructor is -// also cast to volatile; we do this to ensure store order is maintained -// by the compilers. - -void Dictionary::add_klass(unsigned int hash, Symbol* class_name, +void Dictionary::add_klass(JavaThread* current, Symbol* class_name, InstanceKlass* obj) { - assert_locked_or_safepoint(SystemDictionary_lock); + assert_locked_or_safepoint(SystemDictionary_lock); // doesn't matter now assert(obj != NULL, "adding NULL obj"); assert(obj->name() == class_name, "sanity check on name"); - DictionaryEntry* entry = new_entry(hash, obj); - int index = hash_to_index(hash); - add_entry(index, entry); - check_if_needs_resize(); + DictionaryEntry* entry = new DictionaryEntry(obj); + DictionaryLookup lookup(class_name); + bool needs_rehashing, clean_hint; + bool created = _table->insert(current, lookup, entry, &needs_rehashing, &clean_hint); + assert(created, "should be because we have a lock"); + assert (!needs_rehashing, "should never need rehashing"); + assert(!clean_hint, "no class should be unloaded"); + _number_of_entries++; // still locked + // This table can be resized while another thread is reading it. + if (check_if_needs_resize()) { + _table->grow(current); + + // It would be nice to have a JFR event here, add some logging. + LogTarget(Info, class, loader, data) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(<); + ls.print("Dictionary resized to %d entries %d for ", table_size(), _number_of_entries); + loader_data()->print_value_on(&ls); + } + } } - // This routine does not lock the dictionary. // // Since readers don't hold a lock, we must make sure that system @@ -293,27 +275,28 @@ void Dictionary::add_klass(unsigned int hash, Symbol* class_name, // be updated in an MT-safe manner). // // Callers should be aware that an entry could be added just after -// _buckets[index] is read here, so the caller will not see the new entry. -DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash, +// the table is read here, so the caller will not see the new entry. +// The entry may be accessed by the VM thread in verification. +DictionaryEntry* Dictionary::get_entry(Thread* current, Symbol* class_name) { - for (DictionaryEntry* entry = bucket(index); - entry != NULL; - entry = entry->next()) { - if (entry->hash() == hash && entry->instance_klass()->name() == class_name) { - return entry; - } - } - return NULL; + DictionaryLookup lookup(class_name); + DictionaryEntry* result = nullptr; + auto get = [&] (DictionaryEntry** value) { + // function called if value is found so is never null + result = (*value); + }; + bool needs_rehashing = false; + _table->get(current, lookup, get, &needs_rehashing); + assert (!needs_rehashing, "should never need rehashing"); + return result; } - -InstanceKlass* Dictionary::find(unsigned int hash, Symbol* name, +InstanceKlass* Dictionary::find(Thread* current, Symbol* name, Handle protection_domain) { NoSafepointVerifier nsv; - int index = hash_to_index(hash); - DictionaryEntry* entry = get_entry(index, hash, name); + DictionaryEntry* entry = get_entry(current, name); if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) { return entry->instance_klass(); } else { @@ -321,23 +304,19 @@ InstanceKlass* Dictionary::find(unsigned int hash, Symbol* name, } } -InstanceKlass* Dictionary::find_class(unsigned int hash, +InstanceKlass* Dictionary::find_class(Thread* current, Symbol* name) { assert_locked_or_safepoint(SystemDictionary_lock); - - int index = hash_to_index(hash); - assert (index == index_for(name), "incorrect index?"); - - DictionaryEntry* entry = get_entry(index, hash, name); + DictionaryEntry* entry = get_entry(current, name); return (entry != NULL) ? entry->instance_klass() : NULL; } -void Dictionary::add_protection_domain(int index, unsigned int hash, +void Dictionary::add_protection_domain(JavaThread* current, InstanceKlass* klass, Handle protection_domain) { assert(java_lang_System::allow_security_manager(), "only needed if security manager allowed"); Symbol* klass_name = klass->name(); - DictionaryEntry* entry = get_entry(index, hash, klass_name); + DictionaryEntry* entry = get_entry(current, klass_name); assert(entry != NULL,"entry must be present, we just created it"); assert(protection_domain() != NULL, @@ -354,16 +333,14 @@ void Dictionary::add_protection_domain(int index, unsigned int hash, } -inline bool Dictionary::is_valid_protection_domain(unsigned int hash, +inline bool Dictionary::is_valid_protection_domain(JavaThread* current, Symbol* name, Handle protection_domain) { - int index = hash_to_index(hash); - DictionaryEntry* entry = get_entry(index, hash, name); + DictionaryEntry* entry = get_entry(current, name); return entry->is_valid_protection_domain(protection_domain); } -void Dictionary::validate_protection_domain(unsigned int name_hash, - InstanceKlass* klass, +void Dictionary::validate_protection_domain(InstanceKlass* klass, Handle class_loader, Handle protection_domain, TRAPS) { @@ -372,7 +349,7 @@ void Dictionary::validate_protection_domain(unsigned int name_hash, assert(protection_domain() != NULL, "Should not call this"); if (!java_lang_System::allow_security_manager() || - is_valid_protection_domain(name_hash, klass->name(), protection_domain)) { + is_valid_protection_domain(THREAD, klass->name(), protection_domain)) { return; } @@ -424,8 +401,7 @@ void Dictionary::validate_protection_domain(unsigned int name_hash, // and protection domain are expected to succeed. { MutexLocker mu(THREAD, SystemDictionary_lock); - int d_index = hash_to_index(name_hash); - add_protection_domain(d_index, name_hash, klass, + add_protection_domain(THREAD, klass, protection_domain); } } @@ -442,10 +418,8 @@ void Dictionary::clean_cached_protection_domains(GrowableArraynext()) { + auto clean_entries = [&] (DictionaryEntry** value) { + DictionaryEntry* probe = *value; Klass* e = probe->instance_klass(); ProtectionDomainEntry* current = probe->pd_set_acquire(); @@ -458,7 +432,7 @@ void Dictionary::clean_cached_protection_domains(GrowableArrayclass_loader()->print_value_on(&ls); + ls.print("class loader: "); _loader_data->class_loader()->print_value_on(&ls); ls.print(" loading: "); probe->instance_klass()->print_value_on(&ls); ls.cr(); } @@ -477,8 +451,10 @@ void Dictionary::clean_cached_protection_domains(GrowableArraynext_acquire(); } } - } - } + return true; + }; + + _table->do_scan(Thread::current(), clean_entries); } void DictionaryEntry::verify_protection_domain_set() { @@ -505,7 +481,7 @@ void DictionaryEntry::print_count(outputStream *st) { void Dictionary::print_size(outputStream* st) const { st->print_cr("Java dictionary (table_size=%d, classes=%d, resizable=%s)", - table_size(), number_of_entries(), BOOL_TO_STR(_resizable)); + table_size(), _number_of_entries, BOOL_TO_STR(_resizable)); } void Dictionary::print_on(outputStream* st) const { @@ -516,25 +492,29 @@ void Dictionary::print_on(outputStream* st) const { print_size(st); st->print_cr("^ indicates that initiating loader is different from defining loader"); - for (int index = 0; index < table_size(); index++) { - for (DictionaryEntry* probe = bucket(index); - probe != NULL; - probe = probe->next()) { - Klass* e = probe->instance_klass(); - bool is_defining_class = - (loader_data() == e->class_loader_data()); - st->print("%4d: %s%s", index, is_defining_class ? " " : "^", e->external_name()); - ClassLoaderData* cld = e->class_loader_data(); - if (!loader_data()->is_the_null_class_loader_data()) { - // Class loader output for the dictionary for the null class loader data is - // redundant and obvious. - st->print(", "); - cld->print_value_on(st); - st->print(", "); - probe->print_count(st); - } - st->cr(); + auto printer = [&] (DictionaryEntry** entry) { + DictionaryEntry* probe = *entry; + Klass* e = probe->instance_klass(); + bool is_defining_class = + (_loader_data == e->class_loader_data()); + st->print(" %s%s", is_defining_class ? " " : "^", e->external_name()); + ClassLoaderData* cld = e->class_loader_data(); + if (!_loader_data->is_the_null_class_loader_data()) { + // Class loader output for the dictionary for the null class loader data is + // redundant and obvious. + st->print(", "); + cld->print_value_on(st); + st->print(", "); + probe->print_count(st); } + st->cr(); + return true; + }; + + if (SafepointSynchronize::is_at_safepoint()) { + _table->do_safepoint_scan(printer); + } else { + _table->do_scan(Thread::current(), printer); } tty->cr(); } @@ -548,7 +528,7 @@ void DictionaryEntry::verify() { } void Dictionary::verify() { - guarantee(number_of_entries() >= 0, "Verify of dictionary failed"); + guarantee(_number_of_entries >= 0, "Verify of dictionary failed"); ClassLoaderData* cld = loader_data(); // class loader must be present; a null class loader is the @@ -557,8 +537,19 @@ void Dictionary::verify() { (cld->is_the_null_class_loader_data() || cld->class_loader_no_keepalive()->is_instance()), "checking type of class_loader"); - ResourceMark rm; - stringStream tempst; - tempst.print("System Dictionary for %s class loader", cld->loader_name_and_id()); - verify_table(tempst.as_string()); + auto verifier = [&] (DictionaryEntry** val) { + (*val)->verify(); + return true; + }; + + _table->do_safepoint_scan(verifier); +} + +void Dictionary::print_table_statistics(outputStream* st, const char* table_name) { + static TableStatistics ts; + auto sz = [&] (DictionaryEntry** val) { + return sizeof(**val); + }; + ts = _table->statistics_get(Thread::current(), sz, ts); + ts.print(st, table_name); } diff --git a/src/hotspot/share/classfile/dictionary.hpp b/src/hotspot/share/classfile/dictionary.hpp index ef77bacb1f31c..4066be0b5d2d5 100644 --- a/src/hotspot/share/classfile/dictionary.hpp +++ b/src/hotspot/share/classfile/dictionary.hpp @@ -28,7 +28,7 @@ #include "oops/instanceKlass.hpp" #include "oops/oop.hpp" #include "oops/oopHandle.hpp" -#include "utilities/hashtable.hpp" +#include "utilities/concurrentHashTable.hpp" #include "utilities/ostream.hpp" class DictionaryEntry; @@ -38,77 +38,68 @@ template class GrowableArray; //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // The data structure for the class loader data dictionaries. -class Dictionary : public Hashtable { - friend class VMStructs; +class DictionaryEntry; - static bool _some_dictionary_needs_resizing; +class Dictionary : public CHeapObj { bool _resizable; - bool _needs_resizing; - void check_if_needs_resize(); + int _number_of_entries; + + class Config { + public: + using Value = DictionaryEntry*; + static uintx get_hash(Value const& value, bool* is_dead); + static void* allocate_node(void* context, size_t size, Value const& value); + static void free_node(void* context, void* memory, Value const& value); + }; + + using ConcurrentTable = ConcurrentHashTable; + ConcurrentTable* _table; ClassLoaderData* _loader_data; // backpointer to owning loader ClassLoaderData* loader_data() const { return _loader_data; } - DictionaryEntry* get_entry(int index, unsigned int hash, Symbol* name); + DictionaryEntry* get_entry(Thread* current, Symbol* name); + bool check_if_needs_resize(); + int table_size() const; public: - Dictionary(ClassLoaderData* loader_data, int table_size, bool resizable = false); - Dictionary(ClassLoaderData* loader_data, int table_size, HashtableBucket* t, int number_of_entries, bool resizable = false); + Dictionary(ClassLoaderData* loader_data, size_t table_size, bool resizable = false); ~Dictionary(); - static bool does_any_dictionary_needs_resizing(); - bool resize_if_needed(); + void add_klass(JavaThread* current, Symbol* class_name, InstanceKlass* obj); - void add_klass(unsigned int hash, Symbol* class_name, InstanceKlass* obj); - - InstanceKlass* find_class(unsigned int hash, Symbol* name); + InstanceKlass* find_class(Thread* current, Symbol* name); void classes_do(void f(InstanceKlass*)); - void classes_do(void f(InstanceKlass*, TRAPS), TRAPS); void all_entries_do(KlassClosure* closure); void classes_do(MetaspaceClosure* it); void clean_cached_protection_domains(GrowableArray* delete_list); // Protection domains - InstanceKlass* find(unsigned int hash, Symbol* name, Handle protection_domain); - void validate_protection_domain(unsigned int name_hash, - InstanceKlass* klass, + InstanceKlass* find(Thread* current, Symbol* name, Handle protection_domain); + void validate_protection_domain(InstanceKlass* klass, Handle class_loader, Handle protection_domain, TRAPS); + void print_table_statistics(outputStream* st, const char* table_name); + void print_on(outputStream* st) const; void print_size(outputStream* st) const; void verify(); private: - DictionaryEntry* new_entry(unsigned int hash, InstanceKlass* klass); - - DictionaryEntry* bucket(int i) const { - return (DictionaryEntry*)Hashtable::bucket(i); - } - - // The following method is not MT-safe and must be done under lock. - DictionaryEntry** bucket_addr(int i) { - return (DictionaryEntry**)Hashtable::bucket_addr(i); - } - - void free_entry(DictionaryEntry* entry); - - bool is_valid_protection_domain(unsigned int hash, - Symbol* name, + bool is_valid_protection_domain(JavaThread* current, Symbol* name, Handle protection_domain); - void add_protection_domain(int index, unsigned int hash, - InstanceKlass* klass, + void add_protection_domain(JavaThread* current, InstanceKlass* klass, Handle protection_domain); }; // An entry in the class loader data dictionaries, this describes a class as -// { InstanceKlass*, protection_domain }. +// { InstanceKlass*, protection_domain_set }. -class DictionaryEntry : public HashtableEntry { - friend class VMStructs; +class DictionaryEntry : public CHeapObj { private: // Contains the set of approved protection domains that can access // this dictionary entry. @@ -123,24 +114,20 @@ class DictionaryEntry : public HashtableEntry { // It is essentially a cache to avoid repeated Java up-calls to // ClassLoader.checkPackageAccess(). // + InstanceKlass* _instance_klass; ProtectionDomainEntry* volatile _pd_set; public: + DictionaryEntry(InstanceKlass* instance_klass); + ~DictionaryEntry(); + // Tells whether a protection is in the approved set. bool contains_protection_domain(oop protection_domain) const; // Adds a protection domain to the approved set. void add_protection_domain(ClassLoaderData* loader_data, Handle protection_domain); - InstanceKlass* instance_klass() const { return literal(); } - InstanceKlass** klass_addr() { return (InstanceKlass**)literal_addr(); } - - DictionaryEntry* next() const { - return (DictionaryEntry*)HashtableEntry::next(); - } - - DictionaryEntry** next_addr() { - return (DictionaryEntry**)HashtableEntry::next_addr(); - } + InstanceKlass* instance_klass() const { return _instance_klass; } + InstanceKlass** instance_klass_addr() { return &_instance_klass; } ProtectionDomainEntry* pd_set_acquire() const { return Atomic::load_acquire(&_pd_set); } void release_set_pd_set(ProtectionDomainEntry* entry) { Atomic::release_store(&_pd_set, entry); } diff --git a/src/hotspot/share/classfile/loaderConstraints.cpp b/src/hotspot/share/classfile/loaderConstraints.cpp index 033b53fce6fa6..7a0bc547c19b1 100644 --- a/src/hotspot/share/classfile/loaderConstraints.cpp +++ b/src/hotspot/share/classfile/loaderConstraints.cpp @@ -454,6 +454,7 @@ void LoaderConstraintTable::merge_loader_constraints(Symbol* class_name, } void LoaderConstraintTable::verify() { + Thread* thread = Thread::current(); auto check = [&] (Symbol*& key, ConstraintSet& set) { // foreach constraint in the set, check the klass is in the dictionary or placeholder table. int len = set.num_constraints(); @@ -465,8 +466,7 @@ void LoaderConstraintTable::verify() { Symbol* name = ik->name(); ClassLoaderData* loader_data = ik->class_loader_data(); Dictionary* dictionary = loader_data->dictionary(); - unsigned int name_hash = dictionary->compute_hash(name); - InstanceKlass* k = dictionary->find_class(name_hash, name); + InstanceKlass* k = dictionary->find_class(thread, name); if (k != NULL) { // We found the class in the dictionary, so we should // make sure that the Klass* matches what we already have. diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index b6ab8871282db..d7a7dad0a5026 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -212,12 +212,6 @@ class StringTableLookupOop : public StackObj { } }; -static size_t ceil_log2(size_t val) { - size_t ret; - for (ret = 1; ((size_t)1 << ret) < val; ++ret); - return ret; -} - void StringTable::create_table() { size_t start_size_log_2 = ceil_log2(StringTableSize); _current_size = ((size_t)1) << start_size_log_2; diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index bd015d0844dbd..4e93843d01ab5 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -161,12 +161,6 @@ class SymbolTableConfig : public AllStatic { } }; -static size_t ceil_log2(size_t value) { - size_t ret; - for (ret = 1; ((size_t)1 << ret) < value; ++ret); - return ret; -} - void SymbolTable::create_table () { size_t start_size_log_2 = ceil_log2(SymbolTableSize); _current_size = ((size_t)1) << start_size_log_2; diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp index 72368328ae1b8..be6f3d8877cde 100644 --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -198,15 +198,14 @@ ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, bool cre } void SystemDictionary::set_system_loader(ClassLoaderData *cld) { - if (_java_system_loader.is_empty()) { - _java_system_loader = cld->class_loader_handle(); - } + assert(_java_system_loader.is_empty(), "already set!"); + _java_system_loader = cld->class_loader_handle(); + } void SystemDictionary::set_platform_loader(ClassLoaderData *cld) { - if (_java_platform_loader.is_empty()) { - _java_platform_loader = cld->class_loader_handle(); - } + assert(_java_platform_loader.is_empty(), "already set!"); + _java_platform_loader = cld->class_loader_handle(); } // ---------------------------------------------------------------------------- @@ -285,8 +284,7 @@ void verify_dictionary_entry(Symbol* class_name, InstanceKlass* k) { ClassLoaderData* loader_data = k->class_loader_data(); Dictionary* dictionary = loader_data->dictionary(); assert(class_name == k->name(), "Must be the same"); - unsigned int name_hash = dictionary->compute_hash(class_name); - InstanceKlass* kk = dictionary->find_class(name_hash, class_name); + InstanceKlass* kk = dictionary->find_class(JavaThread::current(), class_name); assert(kk == k, "should be present in dictionary"); } #endif @@ -432,13 +430,12 @@ InstanceKlass* SystemDictionary::resolve_super_or_fail(Symbol* class_name, ClassLoaderData* loader_data = class_loader_data(class_loader); Dictionary* dictionary = loader_data->dictionary(); - unsigned int name_hash = dictionary->compute_hash(class_name); // can't throw error holding a lock bool throw_circularity_error = false; { MutexLocker mu(THREAD, SystemDictionary_lock); - InstanceKlass* klassk = dictionary->find_class(name_hash, class_name); + InstanceKlass* klassk = dictionary->find_class(THREAD, class_name); InstanceKlass* quicksuperk; // To support parallel loading: if class is done loading, just return the superclass // if the super_name matches class->super()->name() and if the class loaders match. @@ -563,7 +560,6 @@ static bool should_wait_for_loading(Handle class_loader) { // For bootstrap and non-parallelCapable class loaders, check and wait for // another thread to complete loading this class. InstanceKlass* SystemDictionary::handle_parallel_loading(JavaThread* current, - unsigned int name_hash, Symbol* name, ClassLoaderData* loader_data, Handle lockObject, @@ -603,7 +599,7 @@ InstanceKlass* SystemDictionary::handle_parallel_loading(JavaThread* current, } // Check if classloading completed while we were waiting - InstanceKlass* check = loader_data->dictionary()->find_class(name_hash, name); + InstanceKlass* check = loader_data->dictionary()->find_class(current, name); if (check != NULL) { // Klass is already loaded, so just return it return check; @@ -647,14 +643,13 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader())); ClassLoaderData* loader_data = register_loader(class_loader); Dictionary* dictionary = loader_data->dictionary(); - unsigned int name_hash = dictionary->compute_hash(name); // Do lookup to see if class already exists and the protection domain // has the right access. // This call uses find which checks protection domain already matches // All subsequent calls use find_class, and set loaded_class so that // before we return a result, we call out to java to check for valid protection domain. - InstanceKlass* probe = dictionary->find(name_hash, name, protection_domain); + InstanceKlass* probe = dictionary->find(THREAD, name, protection_domain); if (probe != NULL) return probe; // Non-bootstrap class loaders will call out to class loader and @@ -681,7 +676,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // Check again (after locking) if the class already exists in SystemDictionary { MutexLocker mu(THREAD, SystemDictionary_lock); - InstanceKlass* check = dictionary->find_class(name_hash, name); + InstanceKlass* check = dictionary->find_class(THREAD, name); if (check != NULL) { // InstanceKlass is already loaded, but we still need to check protection domain below. loaded_class = check; @@ -729,7 +724,6 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, MutexLocker mu(THREAD, SystemDictionary_lock); if (should_wait_for_loading(class_loader)) { loaded_class = handle_parallel_loading(THREAD, - name_hash, name, loader_data, lockObject, @@ -739,7 +733,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // Recheck if the class has been loaded for all class loader cases and // add a LOAD_INSTANCE placeholder while holding the SystemDictionary_lock. if (!throw_circularity_error && loaded_class == NULL) { - InstanceKlass* check = dictionary->find_class(name_hash, name); + InstanceKlass* check = dictionary->find_class(THREAD, name); if (check != NULL) { loaded_class = check; } else if (should_wait_for_loading(class_loader)) { @@ -767,7 +761,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, if (loaded_class == NULL) { // Do actual loading - loaded_class = load_instance_class(name_hash, name, class_loader, THREAD); + loaded_class = load_instance_class(name, class_loader, THREAD); } if (load_placeholder_added) { @@ -794,7 +788,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // Check if the protection domain is present it has the right access if (protection_domain() != NULL) { // Verify protection domain. If it fails an exception is thrown - dictionary->validate_protection_domain(name_hash, loaded_class, class_loader, protection_domain, CHECK_NULL); + dictionary->validate_protection_domain(loaded_class, class_loader, protection_domain, CHECK_NULL); } return loaded_class; @@ -809,10 +803,11 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // unloading, when this class loader is no longer referenced. // // Callers should be aware that an entry could be added just after -// _dictionary->bucket(index) is read here, so the caller will not see +// Dictionary is read here, so the caller will not see // the new entry. -InstanceKlass* SystemDictionary::find_instance_klass(Symbol* class_name, +InstanceKlass* SystemDictionary::find_instance_klass(Thread* current, + Symbol* class_name, Handle class_loader, Handle protection_domain) { @@ -829,13 +824,13 @@ InstanceKlass* SystemDictionary::find_instance_klass(Symbol* class_name, } Dictionary* dictionary = loader_data->dictionary(); - unsigned int name_hash = dictionary->compute_hash(class_name); - return dictionary->find(name_hash, class_name, protection_domain); + return dictionary->find(current, class_name, protection_domain); } // Look for a loaded instance or array klass by name. Do not do any loading. // return NULL in case of error. -Klass* SystemDictionary::find_instance_or_array_klass(Symbol* class_name, +Klass* SystemDictionary::find_instance_or_array_klass(Thread* current, + Symbol* class_name, Handle class_loader, Handle protection_domain) { Klass* k = NULL; @@ -851,13 +846,13 @@ Klass* SystemDictionary::find_instance_or_array_klass(Symbol* class_name, if (t != T_OBJECT) { k = Universe::typeArrayKlassObj(t); } else { - k = SystemDictionary::find_instance_klass(ss.as_symbol(), class_loader, protection_domain); + k = SystemDictionary::find_instance_klass(current, ss.as_symbol(), class_loader, protection_domain); } if (k != NULL) { k = k->array_klass_or_null(ndims); } } else { - k = find_instance_klass(class_name, class_loader, protection_domain); + k = find_instance_klass(current, class_name, class_loader, protection_domain); } return k; } @@ -1111,7 +1106,7 @@ bool SystemDictionary::check_shared_class_super_type(InstanceKlass* klass, Insta if (!super_type->is_shared_unregistered_class() && super_type->class_loader_data() != NULL) { // Check if the superclass is loaded by the current class_loader Symbol* name = super_type->name(); - InstanceKlass* check = find_instance_klass(name, class_loader, protection_domain); + InstanceKlass* check = find_instance_klass(THREAD, name, class_loader, protection_domain); if (check == super_type) { return true; } @@ -1402,8 +1397,7 @@ InstanceKlass* SystemDictionary::load_instance_class_impl(Symbol* class_name, Ha } } -InstanceKlass* SystemDictionary::load_instance_class(unsigned int name_hash, - Symbol* name, +InstanceKlass* SystemDictionary::load_instance_class(Symbol* name, Handle class_loader, TRAPS) { @@ -1414,7 +1408,7 @@ InstanceKlass* SystemDictionary::load_instance_class(unsigned int name_hash, if (loaded_class != NULL && loaded_class->class_loader() != class_loader()) { - check_constraints(name_hash, loaded_class, class_loader, false, CHECK_NULL); + check_constraints(loaded_class, class_loader, false, CHECK_NULL); // Record dependency for non-parent delegation. // This recording keeps the defining class loader of the klass (loaded_class) found @@ -1427,7 +1421,7 @@ InstanceKlass* SystemDictionary::load_instance_class(unsigned int name_hash, { // Grabbing the Compile_lock prevents systemDictionary updates // during compilations. MutexLocker mu(THREAD, Compile_lock); - update_dictionary(name_hash, loaded_class, class_loader); + update_dictionary(THREAD, loaded_class, class_loader); } if (JvmtiExport::should_post_class_load()) { @@ -1473,8 +1467,7 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load // which will require a token to perform the define class Symbol* name_h = k->name(); Dictionary* dictionary = loader_data->dictionary(); - unsigned int name_hash = dictionary->compute_hash(name_h); - check_constraints(name_hash, k, class_loader, true, CHECK); + check_constraints(k, class_loader, true, CHECK); // Register class just loaded with class loader (placed in ArrayList) // Note we do this before updating the dictionary, as this can @@ -1498,7 +1491,7 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load // Add to systemDictionary - so other classes can see it. // Grabs and releases SystemDictionary_lock - update_dictionary(name_hash, k, class_loader); + update_dictionary(THREAD, k, class_loader); } // notify jvmti @@ -1535,14 +1528,12 @@ InstanceKlass* SystemDictionary::find_or_define_helper(Symbol* class_name, Handl ClassLoaderData* loader_data = class_loader_data(class_loader); Dictionary* dictionary = loader_data->dictionary(); - unsigned int name_hash = dictionary->compute_hash(name_h); - // Hold SD lock around find_class and placeholder creation for DEFINE_CLASS { MutexLocker mu(THREAD, SystemDictionary_lock); // First check if class already defined if (is_parallelDefine(class_loader)) { - InstanceKlass* check = dictionary->find_class(name_hash, name_h); + InstanceKlass* check = dictionary->find_class(THREAD, name_h); if (check != NULL) { return check; } @@ -1566,7 +1557,7 @@ InstanceKlass* SystemDictionary::find_or_define_helper(Symbol* class_name, Handl PlaceholderTable::find_and_remove(name_h, loader_data, PlaceholderTable::DEFINE_CLASS, THREAD); SystemDictionary_lock->notify_all(); #ifdef ASSERT - InstanceKlass* check = dictionary->find_class(name_hash, name_h); + InstanceKlass* check = dictionary->find_class(THREAD, name_h); assert(check != NULL, "definer missed recording success"); #endif return ik; @@ -1732,8 +1723,7 @@ void SystemDictionary::initialize(TRAPS) { // if defining is true, then LinkageError if already in dictionary // if initiating loader, then ok if InstanceKlass matches existing entry -void SystemDictionary::check_constraints(unsigned int name_hash, - InstanceKlass* k, +void SystemDictionary::check_constraints(InstanceKlass* k, Handle class_loader, bool defining, TRAPS) { @@ -1747,7 +1737,7 @@ void SystemDictionary::check_constraints(unsigned int name_hash, MutexLocker mu(THREAD, SystemDictionary_lock); - InstanceKlass* check = loader_data->dictionary()->find_class(name_hash, name); + InstanceKlass* check = loader_data->dictionary()->find_class(THREAD, name); if (check != NULL) { // If different InstanceKlass - duplicate class definition, // else - ok, class loaded by a different thread in parallel. @@ -1791,7 +1781,7 @@ void SystemDictionary::check_constraints(unsigned int name_hash, // Update class loader data dictionary - done after check_constraint and add_to_hierarchy // have been called. -void SystemDictionary::update_dictionary(unsigned int hash, +void SystemDictionary::update_dictionary(JavaThread* current, InstanceKlass* k, Handle class_loader) { // Compile_lock prevents systemDictionary updates during compilations @@ -1804,9 +1794,9 @@ void SystemDictionary::update_dictionary(unsigned int hash, // Make a new dictionary entry. Dictionary* dictionary = loader_data->dictionary(); - InstanceKlass* sd_check = dictionary->find_class(hash, name); + InstanceKlass* sd_check = dictionary->find_class(current, name); if (sd_check == NULL) { - dictionary->add_klass(hash, name, k); + dictionary->add_klass(current, name, k); } SystemDictionary_lock->notify_all(); } @@ -1822,7 +1812,7 @@ Klass* SystemDictionary::find_constrained_instance_or_array_klass( // First see if it has been loaded directly. // Force the protection domain to be null. (This removes protection checks.) Handle no_protection_domain; - Klass* klass = find_instance_or_array_klass(class_name, class_loader, + Klass* klass = find_instance_or_array_klass(current, class_name, class_loader, no_protection_domain); if (klass != NULL) return klass; @@ -1882,17 +1872,15 @@ bool SystemDictionary::add_loader_constraint(Symbol* class_name, } Dictionary* dictionary1 = loader_data1->dictionary(); - unsigned int name_hash1 = dictionary1->compute_hash(constraint_name); - Dictionary* dictionary2 = loader_data2->dictionary(); - unsigned int name_hash2 = dictionary2->compute_hash(constraint_name); + JavaThread* current = JavaThread::current(); { MutexLocker mu_s(SystemDictionary_lock); - InstanceKlass* klass1 = dictionary1->find_class(name_hash1, constraint_name); - InstanceKlass* klass2 = dictionary2->find_class(name_hash2, constraint_name); + InstanceKlass* klass1 = dictionary1->find_class(current, constraint_name); + InstanceKlass* klass2 = dictionary2->find_class(current, constraint_name); bool result = LoaderConstraintTable::add_entry(constraint_name, klass1, class_loader1, - klass2, class_loader2); + klass2, class_loader2); #if INCLUDE_CDS if (Arguments::is_dumping_archive() && klass_being_linked != NULL && !klass_being_linked->is_shared()) { diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp index 6b1bb6a6a74a9..5f45aebc6b277 100644 --- a/src/hotspot/share/classfile/systemDictionary.hpp +++ b/src/hotspot/share/classfile/systemDictionary.hpp @@ -144,12 +144,13 @@ class SystemDictionary : AllStatic { TRAPS); // Lookup an already loaded class. If not found NULL is returned. - static InstanceKlass* find_instance_klass(Symbol* class_name, Handle class_loader, Handle protection_domain); + static InstanceKlass* find_instance_klass(Thread* current, Symbol* class_name, + Handle class_loader, Handle protection_domain); // Lookup an already loaded instance or array class. // Do not make any queries to class loaders; consult only the cache. // If not found NULL is returned. - static Klass* find_instance_or_array_klass(Symbol* class_name, + static Klass* find_instance_or_array_klass(Thread* current, Symbol* class_name, Handle class_loader, Handle protection_domain); @@ -324,7 +325,6 @@ class SystemDictionary : AllStatic { Handle class_loader, Handle protection_domain, TRAPS); static InstanceKlass* handle_parallel_loading(JavaThread* current, - unsigned int name_hash, Symbol* name, ClassLoaderData* loader_data, Handle lockObject, @@ -335,8 +335,7 @@ class SystemDictionary : AllStatic { Handle class_loader, InstanceKlass* k, TRAPS); static InstanceKlass* load_instance_class_impl(Symbol* class_name, Handle class_loader, TRAPS); - static InstanceKlass* load_instance_class(unsigned int name_hash, - Symbol* class_name, + static InstanceKlass* load_instance_class(Symbol* class_name, Handle class_loader, TRAPS); static bool is_shared_class_visible(Symbol* class_name, InstanceKlass* ik, @@ -400,11 +399,9 @@ class SystemDictionary : AllStatic { static Symbol* find_placeholder(Symbol* name, ClassLoaderData* loader_data); // Class loader constraints - static void check_constraints(unsigned int hash, - InstanceKlass* k, Handle loader, + static void check_constraints(InstanceKlass* k, Handle loader, bool defining, TRAPS); - static void update_dictionary(unsigned int hash, - InstanceKlass* k, Handle loader); + static void update_dictionary(JavaThread* current, InstanceKlass* k, Handle loader); }; #endif // SHARE_CLASSFILE_SYSTEMDICTIONARY_HPP diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 6dc822c5520c0..086a9969333df 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -401,7 +401,6 @@ InstanceKlass* SystemDictionaryShared::find_or_load_shared_class( THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader())); ClassLoaderData *loader_data = register_loader(class_loader); Dictionary* dictionary = loader_data->dictionary(); - unsigned int d_hash = dictionary->compute_hash(name); // Note: currently, find_or_load_shared_class is called only from // JVM_FindLoadedClass and used for PlatformClassLoader and AppClassLoader, @@ -409,7 +408,7 @@ InstanceKlass* SystemDictionaryShared::find_or_load_shared_class( assert(get_loader_lock_or_null(class_loader) == NULL, "ObjectLocker not required"); { MutexLocker mu(THREAD, SystemDictionary_lock); - InstanceKlass* check = dictionary->find_class(d_hash, name); + InstanceKlass* check = dictionary->find_class(THREAD, name); if (check != NULL) { return check; } diff --git a/src/hotspot/share/classfile/vmClasses.cpp b/src/hotspot/share/classfile/vmClasses.cpp index 2af3bae098449..bfbb90d57c807 100644 --- a/src/hotspot/share/classfile/vmClasses.cpp +++ b/src/hotspot/share/classfile/vmClasses.cpp @@ -247,8 +247,7 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load klass->restore_unshareable_info(loader_data, domain, NULL, THREAD); SystemDictionary::load_shared_class_misc(klass, loader_data); Dictionary* dictionary = loader_data->dictionary(); - unsigned int hash = dictionary->compute_hash(klass->name()); - dictionary->add_klass(hash, klass->name(), klass); + dictionary->add_klass(THREAD, klass->name(), klass); SystemDictionary::add_to_hierarchy(klass); assert(klass->is_loaded(), "Must be in at least loaded state"); } diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index bc92fdd645f33..134bb169a684a 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -307,6 +307,8 @@ AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + CodeCache::gc_on_allocation(); + AdapterBlob* blob = NULL; unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); { diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index dbb0269ecf04d..6da296eb90525 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -211,18 +211,8 @@ class CodeBlob { code_contains(addr) && addr >= code_begin() + _frame_complete_offset; } int frame_complete_offset() const { return _frame_complete_offset; } - // CodeCache support: really only used by the nmethods, but in order to get - // asserts and certain bookkeeping to work in the CodeCache they are defined - // virtual here. - virtual bool is_zombie() const { return false; } - virtual bool is_locked_by_vm() const { return false; } - - virtual bool is_unloaded() const { return false; } virtual bool is_not_entrant() const { return false; } - // GC support - virtual bool is_alive() const = 0; - // OopMap for frame ImmutableOopMapSet* oop_maps() const { return _oop_maps; } void set_oop_maps(OopMapSet* p); @@ -384,9 +374,6 @@ class RuntimeBlob : public CodeBlob { static void free(RuntimeBlob* blob); - // GC support - virtual bool is_alive() const = 0; - void verify(); // OopMap for frame @@ -435,7 +422,6 @@ class BufferBlob: public RuntimeBlob { // GC/Verification support void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } - bool is_alive() const { return true; } void verify(); void print_on(outputStream* st) const; @@ -532,7 +518,6 @@ class RuntimeStub: public RuntimeBlob { // GC/Verification support void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } - bool is_alive() const { return true; } void verify(); void print_on(outputStream* st) const; @@ -567,8 +552,6 @@ class SingletonBlob: public RuntimeBlob { address entry_point() { return code_begin(); } - bool is_alive() const { return true; } - // GC/Verification support void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } void verify(); // does nothing @@ -801,7 +784,6 @@ class UpcallStub: public RuntimeBlob { // GC/Verification support void oops_do(OopClosure* f, const frame& frame); virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override; - virtual bool is_alive() const override { return true; } virtual void verify() override; // Misc. diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index 5752ffeacf288..c5295a0ea1dd5 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -56,11 +56,11 @@ #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" +#include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" #include "runtime/safepointVerifiers.hpp" -#include "runtime/sweeper.hpp" #include "runtime/vmThread.hpp" #include "services/memoryService.hpp" #include "utilities/align.hpp" @@ -170,9 +170,6 @@ address CodeCache::_high_bound = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0; ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL; -int CodeCache::Sweep::_compiled_method_iterators = 0; -bool CodeCache::Sweep::_pending_sweep = false; - // Initialize arrays of CodeHeap subsets GrowableArray* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); GrowableArray* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); @@ -481,40 +478,6 @@ CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) { return NULL; } -void CodeCache::Sweep::begin_compiled_method_iteration() { - MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); - // Reach a state without concurrent sweeping - while (_compiled_method_iterators < 0) { - CodeCache_lock->wait_without_safepoint_check(); - } - _compiled_method_iterators++; -} - -void CodeCache::Sweep::end_compiled_method_iteration() { - MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); - // Let the sweeper run again, if we stalled it - _compiled_method_iterators--; - if (_pending_sweep) { - CodeCache_lock->notify_all(); - } -} - -void CodeCache::Sweep::begin() { - assert_locked_or_safepoint(CodeCache_lock); - _pending_sweep = true; - while (_compiled_method_iterators > 0) { - CodeCache_lock->wait_without_safepoint_check(); - } - _pending_sweep = false; - _compiled_method_iterators = -1; -} - -void CodeCache::Sweep::end() { - assert_locked_or_safepoint(CodeCache_lock); - _compiled_method_iterators = 0; - CodeCache_lock->notify_all(); -} - CodeBlob* CodeCache::first_blob(CodeHeap* heap) { assert_locked_or_safepoint(CodeCache_lock); assert(heap != NULL, "heap is null"); @@ -543,8 +506,6 @@ CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { * instantiating. */ CodeBlob* CodeCache::allocate(int size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) { - // Possibly wakes up the sweeper thread. - NMethodSweeper::report_allocation(); assert_locked_or_safepoint(CodeCache_lock); assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); if (size <= 0) { @@ -568,8 +529,6 @@ CodeBlob* CodeCache::allocate(int size, CodeBlobType code_blob_type, bool handle if (SegmentedCodeCache) { // Fallback solution: Try to store code in another code heap. // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) - // Note that in the sweeper, we check the reverse_free_ratio of the code heap - // and force stack scanning if less than 10% of the entire code cache are free. CodeBlobType type = code_blob_type; switch (type) { case CodeBlobType::NonNMethod: @@ -687,33 +646,14 @@ bool CodeCache::contains(nmethod *nm) { return contains((void *)nm); } -static bool is_in_asgct() { - Thread* current_thread = Thread::current_or_null_safe(); - return current_thread != NULL && current_thread->is_Java_thread() && JavaThread::cast(current_thread)->in_asgct(); -} - -// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not -// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain +// This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. CodeBlob* CodeCache::find_blob(void* start) { - CodeBlob* result = find_blob_unsafe(start); - // We could potentially look up non_entrant methods - bool is_zombie = result != NULL && result->is_zombie(); - bool is_result_safe = !is_zombie || result->is_locked_by_vm() || VMError::is_error_reported(); - guarantee(is_result_safe || is_in_asgct(), "unsafe access to zombie method"); - // When in ASGCT the previous gurantee will pass for a zombie method but we still don't want that code blob returned in order - // to minimize the chance of accessing dead memory - return is_result_safe ? result : NULL; -} - -// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know -// what you are doing) -CodeBlob* CodeCache::find_blob_unsafe(void* start) { // NMT can walk the stack before code cache is created if (_heaps != NULL) { CodeHeap* heap = get_code_heap_containing(start); if (heap != NULL) { - return heap->find_blob_unsafe(start); + return heap->find_blob(start); } } return NULL; @@ -744,7 +684,7 @@ void CodeCache::nmethods_do(void f(nmethod* nm)) { void CodeCache::metadata_do(MetadataClosure* f) { assert_locked_or_safepoint(CodeCache_lock); - NMethodIterator iter(NMethodIterator::only_alive); + NMethodIterator iter(NMethodIterator::all_blobs); while(iter.next()) { iter.method()->metadata_do(f); } @@ -758,10 +698,188 @@ int CodeCache::alignment_offset() { return (int)_heaps->first()->alignment_offset(); } +// Calculate the number of GCs after which an nmethod is expected to have been +// used in order to not be classed as cold. +void CodeCache::update_cold_gc_count() { + if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) { + // No aging + return; + } + + size_t last_used = _last_unloading_used; + double last_time = _last_unloading_time; + + double time = os::elapsedTime(); + + size_t free = unallocated_capacity(); + size_t max = max_capacity(); + size_t used = max - free; + double gc_interval = time - last_time; + + _unloading_threshold_gc_requested = false; + _last_unloading_time = time; + _last_unloading_used = used; + + if (last_time == 0.0) { + // The first GC doesn't have enough information to make good + // decisions, so just keep everything afloat + log_info(codecache)("Unknown code cache pressure; don't age code"); + return; + } + + if (gc_interval <= 0.0 || last_used >= used) { + // Dodge corner cases where there is no pressure or negative pressure + // on the code cache. Just don't unload when this happens. + _cold_gc_count = INT_MAX; + log_info(codecache)("No code cache pressure; don't age code"); + return; + } + + double allocation_rate = (used - last_used) / gc_interval; + + _unloading_allocation_rates.add(allocation_rate); + _unloading_gc_intervals.add(gc_interval); + + size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max; + if (free < aggressive_sweeping_free_threshold) { + // We are already in the red zone; be very aggressive to avoid disaster + // But not more aggressive than 2. This ensures that an nmethod must + // have been unused at least between two GCs to be considered cold still. + _cold_gc_count = 2; + log_info(codecache)("Code cache critically low; use aggressive aging"); + return; + } + + // The code cache has an expected time for cold nmethods to "time out" + // when they have not been used. The time for nmethods to time out + // depends on how long we expect we can keep allocating code until + // aggressive sweeping starts, based on sampled allocation rates. + double average_gc_interval = _unloading_gc_intervals.avg(); + double average_allocation_rate = _unloading_allocation_rates.avg(); + double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate; + double cold_timeout = time_to_aggressive / NmethodSweepActivity; + + // Convert time to GC cycles, and crop at INT_MAX. The reason for + // that is that the _cold_gc_count will be added to an epoch number + // and that addition must not overflow, or we can crash the VM. + // But not more aggressive than 2. This ensures that an nmethod must + // have been unused at least between two GCs to be considered cold still. + _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2); + + double used_ratio = double(used) / double(max); + double last_used_ratio = double(last_used) / double(max); + log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT + ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s", + average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count, + double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval); + +} + +uint64_t CodeCache::cold_gc_count() { + return _cold_gc_count; +} + +void CodeCache::gc_on_allocation() { + if (!is_init_completed()) { + // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what + return; + } + + size_t free = unallocated_capacity(); + size_t max = max_capacity(); + size_t used = max - free; + double free_ratio = double(free) / double(max); + if (free_ratio <= StartAggressiveSweepingAt / 100.0) { + // In case the GC is concurrent, we make sure only one thread requests the GC. + if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { + log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0); + Universe::heap()->collect(GCCause::_codecache_GC_aggressive); + } + return; + } + + size_t last_used = _last_unloading_used; + if (last_used >= used) { + // No increase since last GC; no need to sweep yet + return; + } + size_t allocated_since_last = used - last_used; + double allocated_since_last_ratio = double(allocated_since_last) / double(max); + double threshold = SweeperThreshold / 100.0; + double used_ratio = double(used) / double(max); + double last_used_ratio = double(last_used) / double(max); + if (used_ratio > threshold) { + // After threshold is reached, scale it by free_ratio so that more aggressive + // GC is triggered as we approach code cache exhaustion + threshold *= free_ratio; + } + // If code cache has been allocated without any GC at all, let's make sure + // it is eventually invoked to avoid trouble. + if (allocated_since_last_ratio > threshold) { + // In case the GC is concurrent, we make sure only one thread requests the GC. + if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { + log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)", + threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0); + Universe::heap()->collect(GCCause::_codecache_GC_threshold); + } + } +} + +// We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle +// subtracts the value by 2, and the type is unsigned. We don't want underflow. +// +// Odd values mean that marking is in progress, and even values mean that no +// marking is currently active. +uint64_t CodeCache::_gc_epoch = 2; + +// How many GCs after an nmethod has not been used, do we consider it cold? +uint64_t CodeCache::_cold_gc_count = INT_MAX; + +double CodeCache::_last_unloading_time = 0.0; +size_t CodeCache::_last_unloading_used = 0; +volatile bool CodeCache::_unloading_threshold_gc_requested = false; +TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */); +TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */); + +uint64_t CodeCache::gc_epoch() { + return _gc_epoch; +} + +bool CodeCache::is_gc_marking_cycle_active() { + // Odd means that marking is active + return (_gc_epoch % 2) == 1; +} + +uint64_t CodeCache::previous_completed_gc_marking_cycle() { + if (is_gc_marking_cycle_active()) { + return _gc_epoch - 2; + } else { + return _gc_epoch - 1; + } +} + +void CodeCache::on_gc_marking_cycle_start() { + assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended"); + ++_gc_epoch; +} + +void CodeCache::on_gc_marking_cycle_finish() { + assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished"); + ++_gc_epoch; + update_cold_gc_count(); +} + +void CodeCache::arm_all_nmethods() { + BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); + if (bs_nm != NULL) { + bs_nm->arm_all_nmethods(); + } +} + // Mark nmethods for unloading if they contain otherwise unreachable oops. void CodeCache::do_unloading(bool unloading_occurred) { assert_locked_or_safepoint(CodeCache_lock); - CompiledMethodIterator iter(CompiledMethodIterator::only_alive); + CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); while(iter.next()) { iter.method()->do_unloading(unloading_occurred); } @@ -771,24 +889,21 @@ void CodeCache::blobs_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock); FOR_ALL_ALLOCABLE_HEAPS(heap) { FOR_ALL_BLOBS(cb, *heap) { - if (cb->is_alive()) { - f->do_code_blob(cb); + f->do_code_blob(cb); #ifdef ASSERT - if (cb->is_nmethod()) { - Universe::heap()->verify_nmethod((nmethod*)cb); - } -#endif //ASSERT + if (cb->is_nmethod()) { + Universe::heap()->verify_nmethod((nmethod*)cb); } +#endif //ASSERT } } } void CodeCache::verify_clean_inline_caches() { #ifdef ASSERT - NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { nmethod* nm = iter.method(); - assert(!nm->is_unloaded(), "Tautology"); nm->verify_clean_inline_caches(); nm->verify(); } @@ -840,7 +955,50 @@ void CodeCache::purge_exception_caches() { _exception_cache_purge_list = NULL; } +// Register an is_unloading nmethod to be flushed after unlinking +void CodeCache::register_unlinked(nmethod* nm) { + assert(nm->unlinked_next() == NULL, "Only register for unloading once"); + for (;;) { + // Only need acquire when reading the head, when the next + // pointer is walked, which it is not here. + nmethod* head = Atomic::load(&_unlinked_head); + nmethod* next = head != NULL ? head : nm; // Self looped means end of list + nm->set_unlinked_next(next); + if (Atomic::cmpxchg(&_unlinked_head, head, nm) == head) { + break; + } + } +} + +// Flush all the nmethods the GC unlinked +void CodeCache::flush_unlinked_nmethods() { + nmethod* nm = _unlinked_head; + _unlinked_head = NULL; + size_t freed_memory = 0; + while (nm != NULL) { + nmethod* next = nm->unlinked_next(); + freed_memory += nm->total_size(); + nm->flush(); + if (next == nm) { + // Self looped means end of list + break; + } + nm = next; + } + + // Try to start the compiler again if we freed any memory + if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) { + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + log_info(codecache)("Restarting compiler"); + EventJitRestart event; + event.set_freedMemory(freed_memory); + event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); + event.commit(); + } +} + uint8_t CodeCache::_unloading_cycle = 1; +nmethod* volatile CodeCache::_unlinked_head = NULL; void CodeCache::increment_unloading_cycle() { // 2-bit value (see IsUnloadingState in nmethod.cpp for details) @@ -863,12 +1021,13 @@ CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive) CodeCache::UnloadingScope::~UnloadingScope() { IsUnloadingBehaviour::set_current(_saved_behaviour); DependencyContext::cleaning_end(); + CodeCache::flush_unlinked_nmethods(); } void CodeCache::verify_oops() { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); VerifyOopClosure voc; - NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { nmethod* nm = iter.method(); nm->oops_do(&voc); @@ -1057,17 +1216,18 @@ int CodeCache::number_of_nmethods_with_dependencies() { void CodeCache::clear_inline_caches() { assert_locked_or_safepoint(CodeCache_lock); - CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); + CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); while(iter.next()) { iter.method()->clear_inline_caches(); } } -void CodeCache::cleanup_inline_caches() { +// Only used by whitebox API +void CodeCache::cleanup_inline_caches_whitebox() { assert_locked_or_safepoint(CodeCache_lock); - NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { - iter.method()->cleanup_inline_caches(/*clean_all=*/true); + iter.method()->cleanup_inline_caches_whitebox(); } } @@ -1129,7 +1289,7 @@ static void reset_old_method_table() { } } -// Remove this method when zombied or unloaded. +// Remove this method when flushed. void CodeCache::unregister_old_nmethod(CompiledMethod* c) { assert_lock_strong(CodeCache_lock); if (old_compiled_method_table != NULL) { @@ -1147,8 +1307,8 @@ void CodeCache::old_nmethods_do(MetadataClosure* f) { length = old_compiled_method_table->length(); for (int i = 0; i < length; i++) { CompiledMethod* cm = old_compiled_method_table->at(i); - // Only walk alive nmethods, the dead ones will get removed by the sweeper or GC. - if (cm->is_alive() && !cm->is_unloading()) { + // Only walk !is_unloading nmethods, the other ones will get removed by the GC. + if (!cm->is_unloading()) { old_compiled_method_table->at(i)->metadata_do(f); } } @@ -1164,7 +1324,7 @@ int CodeCache::mark_dependents_for_evol_deoptimization() { reset_old_method_table(); int number_of_marked_CodeBlobs = 0; - CompiledMethodIterator iter(CompiledMethodIterator::only_alive); + CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); while(iter.next()) { CompiledMethod* nm = iter.method(); // Walk all alive nmethods to check for old Methods. @@ -1184,7 +1344,7 @@ int CodeCache::mark_dependents_for_evol_deoptimization() { void CodeCache::mark_all_nmethods_for_evol_deoptimization() { assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); - CompiledMethodIterator iter(CompiledMethodIterator::only_alive); + CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); while(iter.next()) { CompiledMethod* nm = iter.method(); if (!nm->method()->is_method_handle_intrinsic()) { @@ -1216,7 +1376,7 @@ void CodeCache::flush_evol_dependents() { // Mark methods for deopt (if safe or possible). void CodeCache::mark_all_nmethods_for_deoptimization() { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); + CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); if (!nm->is_native_method()) { @@ -1229,7 +1389,7 @@ int CodeCache::mark_for_deoptimization(Method* dependee) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int number_of_marked_CodeBlobs = 0; - CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); + CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); if (nm->is_dependent_on_method(dependee)) { @@ -1243,7 +1403,7 @@ int CodeCache::mark_for_deoptimization(Method* dependee) { } void CodeCache::make_marked_nmethods_deoptimized() { - SweeperBlockingCompiledMethodIterator iter(SweeperBlockingCompiledMethodIterator::only_alive_and_not_unloading); + RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { @@ -1298,9 +1458,7 @@ void CodeCache::verify() { FOR_ALL_HEAPS(heap) { (*heap)->verify(); FOR_ALL_BLOBS(cb, *heap) { - if (cb->is_alive()) { - cb->verify(); - } + cb->verify(); } } } @@ -1414,10 +1572,7 @@ void CodeCache::print_internals() { int uncommonTrapStubCount = 0; int bufferBlobCount = 0; int total = 0; - int nmethodAlive = 0; int nmethodNotEntrant = 0; - int nmethodZombie = 0; - int nmethodUnloaded = 0; int nmethodJava = 0; int nmethodNative = 0; int max_nm_size = 0; @@ -1437,17 +1592,12 @@ void CodeCache::print_internals() { ResourceMark rm; char *method_name = nm->method()->name_and_sig_as_C_string(); tty->print("%s", method_name); - if(nm->is_alive()) { tty->print_cr(" alive"); } if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } - if(nm->is_zombie()) { tty->print_cr(" zombie"); } } nmethodCount++; - if(nm->is_alive()) { nmethodAlive++; } if(nm->is_not_entrant()) { nmethodNotEntrant++; } - if(nm->is_zombie()) { nmethodZombie++; } - if(nm->is_unloaded()) { nmethodUnloaded++; } if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } if(nm->method() != NULL && nm->is_java_method()) { @@ -1484,10 +1634,7 @@ void CodeCache::print_internals() { tty->print_cr("Code Cache Entries (total of %d)",total); tty->print_cr("-------------------------------------------------"); tty->print_cr("nmethods: %d",nmethodCount); - tty->print_cr("\talive: %d",nmethodAlive); tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); - tty->print_cr("\tzombie: %d",nmethodZombie); - tty->print_cr("\tunloaded: %d",nmethodUnloaded); tty->print_cr("\tjava: %d",nmethodJava); tty->print_cr("\tnative: %d",nmethodNative); tty->print_cr("runtime_stubs: %d",runtimeStubCount); @@ -1495,7 +1642,7 @@ void CodeCache::print_internals() { tty->print_cr("buffer blobs: %d",bufferBlobCount); tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); - tty->print_cr("\nnmethod size distribution (non-zombie java)"); + tty->print_cr("\nnmethod size distribution"); tty->print_cr("-------------------------------------------------"); for(int i=0; iis_nmethod()) { const int level = cb->as_nmethod()->comp_level(); assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level"); - if (!cb->is_alive()) { - dead[level].add(cb); - } else { - live[level].add(cb); - } + live[level].add(cb); } else if (cb->is_runtime_stub()) { runtimeStub.add(cb); } else if (cb->is_deoptimization_stub()) { @@ -1568,7 +1710,6 @@ void CodeCache::print() { } tty->print_cr("%s:", level_name); live[i].print("live"); - dead[i].print("dead"); } struct { @@ -1595,14 +1736,12 @@ void CodeCache::print() { int map_size = 0; FOR_ALL_ALLOCABLE_HEAPS(heap) { FOR_ALL_BLOBS(cb, *heap) { - if (cb->is_alive()) { - number_of_blobs++; - code_size += cb->code_size(); - ImmutableOopMapSet* set = cb->oop_maps(); - if (set != NULL) { - number_of_oop_maps += set->count(); - map_size += set->nr_of_bytes(); - } + number_of_blobs++; + code_size += cb->code_size(); + ImmutableOopMapSet* set = cb->oop_maps(); + if (set != NULL) { + number_of_oop_maps += set->count(); + map_size += set->nr_of_bytes(); } } } @@ -1659,7 +1798,7 @@ void CodeCache::print_summary(outputStream* st, bool detailed) { void CodeCache::print_codelist(outputStream* st) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); + CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); while (iter.next()) { CompiledMethod* cm = iter.method(); ResourceMark rm; @@ -1698,7 +1837,7 @@ void CodeCache::write_perf_map() { return; } - AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_alive_and_not_unloading); + AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_not_unloading); while (iter.next()) { CodeBlob *cb = iter.method(); ResourceMark rm; diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp index f1216a9d2410c..19eba85914d03 100644 --- a/src/hotspot/share/code/codeCache.hpp +++ b/src/hotspot/share/code/codeCache.hpp @@ -33,6 +33,7 @@ #include "oops/instanceKlass.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/mutexLocker.hpp" +#include "utilities/numberSeq.hpp" // The CodeCache implements the code cache for various pieces of generated // code, e.g., compiled java methods, runtime stubs, transition frames, etc. @@ -95,7 +96,16 @@ class CodeCache : AllStatic { static address _low_bound; // Lower bound of CodeHeap addresses static address _high_bound; // Upper bound of CodeHeap addresses static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies - static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded + + static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded + static uint64_t _gc_epoch; // Global state for tracking when nmethods were found to be on-stack + static uint64_t _cold_gc_count; // Global state for determining how many GCs are needed before an nmethod is cold + static size_t _last_unloading_used; + static double _last_unloading_time; + static TruncatedSeq _unloading_gc_intervals; + static TruncatedSeq _unloading_allocation_rates; + static volatile bool _unloading_threshold_gc_requested; + static nmethod* volatile _unlinked_head; static ExceptionCache* volatile _exception_cache_purge_list; @@ -116,21 +126,6 @@ class CodeCache : AllStatic { static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap static CodeBlob* first_blob(CodeBlobType code_blob_type); // Returns the first CodeBlob of the given type static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap - public: - - class Sweep { - friend class CodeCache; - template friend class CodeBlobIterator; - private: - static int _compiled_method_iterators; - static bool _pending_sweep; - public: - static void begin(); - static void end(); - private: - static void begin_compiled_method_iteration(); - static void end_compiled_method_iteration(); - }; private: static size_t bytes_allocated_in_freelists(); @@ -168,7 +163,6 @@ class CodeCache : AllStatic { // Lookup static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address - static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method static CodeBlob* find_blob_fast(void* start); // Returns the CodeBlob containing the given address static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address static int find_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise @@ -197,6 +191,22 @@ class CodeCache : AllStatic { ~UnloadingScope(); }; + // Code cache unloading heuristics + static uint64_t cold_gc_count(); + static void update_cold_gc_count(); + static void gc_on_allocation(); + + // The GC epoch and marking_cycle code below is there to support sweeping + // nmethods in loom stack chunks. + static uint64_t gc_epoch(); + static bool is_gc_marking_cycle_active(); + static uint64_t previous_completed_gc_marking_cycle(); + static void on_gc_marking_cycle_start(); + static void on_gc_marking_cycle_finish(); + static void arm_all_nmethods(); + + static void flush_unlinked_nmethods(); + static void register_unlinked(nmethod* nm); static void do_unloading(bool unloading_occurred); static uint8_t unloading_cycle() { return _unloading_cycle; } @@ -239,7 +249,7 @@ class CodeCache : AllStatic { static bool is_non_nmethod(address addr); static void clear_inline_caches(); // clear all inline caches - static void cleanup_inline_caches(); // clean unloaded/zombie nmethods from inline caches + static void cleanup_inline_caches_whitebox(); // clean bad nmethods from inline caches // Returns true if an own CodeHeap for the given CodeBlobType is available static bool heap_available(CodeBlobType code_blob_type); @@ -328,31 +338,18 @@ class CodeCache : AllStatic { // Iterator to iterate over code blobs in the CodeCache. -template class CodeBlobIterator : public StackObj { +// The relaxed iterators only hold the CodeCache_lock across next calls +template class CodeBlobIterator : public StackObj { public: - enum LivenessFilter { all_blobs, only_alive, only_alive_and_not_unloading }; + enum LivenessFilter { all_blobs, only_not_unloading }; private: CodeBlob* _code_blob; // Current CodeBlob GrowableArrayIterator _heap; GrowableArrayIterator _end; - bool _only_alive; bool _only_not_unloading; void initialize_iteration(T* nm) { - if (Filter::heaps() == NULL) { - return; - } - _heap = Filter::heaps()->begin(); - _end = Filter::heaps()->end(); - // If set to NULL, initialized by first call to next() - _code_blob = (CodeBlob*)nm; - if (nm != NULL) { - while(!(*_heap)->contains_blob(_code_blob)) { - ++_heap; - } - assert((*_heap)->contains_blob(_code_blob), "match not found"); - } } bool next_impl() { @@ -366,11 +363,6 @@ template class CodeBlobIterator continue; } - // Filter is_alive as required - if (_only_alive && !_code_blob->is_alive()) { - continue; - } - // Filter is_unloading as required if (_only_not_unloading) { CompiledMethod* cm = _code_blob->as_compiled_method_or_null(); @@ -385,26 +377,30 @@ template class CodeBlobIterator public: CodeBlobIterator(LivenessFilter filter, T* nm = NULL) - : _only_alive(filter == only_alive || filter == only_alive_and_not_unloading), - _only_not_unloading(filter == only_alive_and_not_unloading) + : _only_not_unloading(filter == only_not_unloading) { - if (is_compiled_method) { - CodeCache::Sweep::begin_compiled_method_iteration(); - initialize_iteration(nm); - } else { - initialize_iteration(nm); + if (Filter::heaps() == NULL) { + // The iterator is supposed to shortcut since we have + // _heap == _end, but make sure we do not have garbage + // in other fields as well. + _code_blob = nullptr; + return; } - } - - ~CodeBlobIterator() { - if (is_compiled_method) { - CodeCache::Sweep::end_compiled_method_iteration(); + _heap = Filter::heaps()->begin(); + _end = Filter::heaps()->end(); + // If set to NULL, initialized by first call to next() + _code_blob = nm; + if (nm != NULL) { + while(!(*_heap)->contains_blob(_code_blob)) { + ++_heap; + } + assert((*_heap)->contains_blob(_code_blob), "match not found"); } } // Advance iterator to next blob bool next() { - if (is_compiled_method) { + if (is_relaxed) { MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); return next_impl(); } else { @@ -458,10 +454,9 @@ struct AllCodeBlobsFilter { static const GrowableArray* heaps() { return CodeCache::heaps(); } }; -typedef CodeBlobIterator CompiledMethodIterator; -typedef CodeBlobIterator NMethodIterator; -typedef CodeBlobIterator AllCodeBlobsIterator; - -typedef CodeBlobIterator SweeperBlockingCompiledMethodIterator; +typedef CodeBlobIterator CompiledMethodIterator; +typedef CodeBlobIterator RelaxedCompiledMethodIterator; +typedef CodeBlobIterator NMethodIterator; +typedef CodeBlobIterator AllCodeBlobsIterator; #endif // SHARE_CODE_CODECACHE_HPP diff --git a/src/hotspot/share/code/codeHeapState.cpp b/src/hotspot/share/code/codeHeapState.cpp index 79f400f4e690d..d0411062f65af 100644 --- a/src/hotspot/share/code/codeHeapState.cpp +++ b/src/hotspot/share/code/codeHeapState.cpp @@ -27,8 +27,8 @@ #include "code/codeHeapState.hpp" #include "compiler/compileBroker.hpp" #include "oops/klass.inline.hpp" +#include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" -#include "runtime/sweeper.hpp" #include "utilities/powerOfTwo.hpp" // ------------------------- @@ -216,18 +216,16 @@ const char* blobTypeName[] = {"noType" , "nMethod (active)" , "nMethod (inactive)" , "nMethod (deopt)" - , "nMethod (zombie)" - , "nMethod (unloaded)" - , "runtime stub" - , "ricochet stub" - , "deopt stub" - , "uncommon trap stub" - , "exception stub" - , "safepoint stub" - , "adapter blob" - , "MH adapter blob" - , "buffer blob" - , "lastType" + , "runtime stub" + , "ricochet stub" + , "deopt stub" + , "uncommon trap stub" + , "exception stub" + , "safepoint stub" + , "adapter blob" + , "MH adapter blob" + , "buffer blob" + , "lastType" }; const char* compTypeName[] = { "none", "c1", "c2", "jvmci" }; @@ -249,8 +247,6 @@ static bool segment_granules = false; static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only. static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only. static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only. -static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only. -static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state. static unsigned int nBlocks_stub = 0; static struct FreeBlk* FreeArray = NULL; @@ -262,11 +258,6 @@ static unsigned int used_topSizeBlocks = 0; static struct SizeDistributionElement* SizeDistributionArray = NULL; -// nMethod temperature (hotness) indicators. -static int avgTemp = 0; -static int maxTemp = 0; -static int minTemp = 0; - static unsigned int latest_compilation_id = 0; static volatile bool initialization_complete = false; @@ -319,8 +310,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) nBlocks_t1 = CodeHeapStatArray[ix].nBlocks_t1; nBlocks_t2 = CodeHeapStatArray[ix].nBlocks_t2; nBlocks_alive = CodeHeapStatArray[ix].nBlocks_alive; - nBlocks_dead = CodeHeapStatArray[ix].nBlocks_dead; - nBlocks_unloaded = CodeHeapStatArray[ix].nBlocks_unloaded; nBlocks_stub = CodeHeapStatArray[ix].nBlocks_stub; FreeArray = CodeHeapStatArray[ix].FreeArray; alloc_freeBlocks = CodeHeapStatArray[ix].alloc_freeBlocks; @@ -328,9 +317,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) alloc_topSizeBlocks = CodeHeapStatArray[ix].alloc_topSizeBlocks; used_topSizeBlocks = CodeHeapStatArray[ix].used_topSizeBlocks; SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray; - avgTemp = CodeHeapStatArray[ix].avgTemp; - maxTemp = CodeHeapStatArray[ix].maxTemp; - minTemp = CodeHeapStatArray[ix].minTemp; } else { StatArray = NULL; seg_size = 0; @@ -341,8 +327,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) nBlocks_t1 = 0; nBlocks_t2 = 0; nBlocks_alive = 0; - nBlocks_dead = 0; - nBlocks_unloaded = 0; nBlocks_stub = 0; FreeArray = NULL; alloc_freeBlocks = 0; @@ -350,9 +334,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) alloc_topSizeBlocks = 0; used_topSizeBlocks = 0; SizeDistributionArray = NULL; - avgTemp = 0; - maxTemp = 0; - minTemp = 0; } } @@ -367,8 +348,6 @@ void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) CodeHeapStatArray[ix].nBlocks_t1 = nBlocks_t1; CodeHeapStatArray[ix].nBlocks_t2 = nBlocks_t2; CodeHeapStatArray[ix].nBlocks_alive = nBlocks_alive; - CodeHeapStatArray[ix].nBlocks_dead = nBlocks_dead; - CodeHeapStatArray[ix].nBlocks_unloaded = nBlocks_unloaded; CodeHeapStatArray[ix].nBlocks_stub = nBlocks_stub; CodeHeapStatArray[ix].FreeArray = FreeArray; CodeHeapStatArray[ix].alloc_freeBlocks = alloc_freeBlocks; @@ -376,9 +355,6 @@ void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) CodeHeapStatArray[ix].alloc_topSizeBlocks = alloc_topSizeBlocks; CodeHeapStatArray[ix].used_topSizeBlocks = used_topSizeBlocks; CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray; - CodeHeapStatArray[ix].avgTemp = avgTemp; - CodeHeapStatArray[ix].maxTemp = maxTemp; - CodeHeapStatArray[ix].minTemp = minTemp; } } @@ -659,8 +635,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular nBlocks_t1 = 0; nBlocks_t2 = 0; nBlocks_alive = 0; - nBlocks_dead = 0; - nBlocks_unloaded = 0; nBlocks_stub = 0; nBlocks_free = 0; @@ -692,19 +666,13 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular size_t aliveSpace = 0; size_t disconnSpace = 0; size_t notentrSpace = 0; - size_t deadSpace = 0; - size_t unloadedSpace = 0; size_t stubSpace = 0; size_t freeSpace = 0; size_t maxFreeSize = 0; HeapBlock* maxFreeBlock = NULL; bool insane = false; - int64_t hotnessAccumulator = 0; unsigned int n_methods = 0; - avgTemp = 0; - minTemp = (int)(res_size > M ? (res_size/M)*2 : 1); - maxTemp = -minTemp; for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) { unsigned int hb_len = (unsigned int)h->length(); // despite being size_t, length can never overflow an unsigned int. @@ -758,7 +726,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular if (cbType != noType) { const char* blob_name = nullptr; unsigned int nm_size = 0; - int temperature = 0; nmethod* nm = cb->as_nmethod_or_null(); if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb. ResourceMark rm; @@ -784,11 +751,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular switch (cbType) { case nMethod_inuse: { // only for executable methods!!! // space for these cbs is accounted for later. - temperature = nm->hotness_counter(); - hotnessAccumulator += temperature; n_methods++; - maxTemp = (temperature > maxTemp) ? temperature : maxTemp; - minTemp = (temperature < minTemp) ? temperature : minTemp; break; } case nMethod_notused: @@ -803,14 +766,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular aliveSpace += hb_bytelen; notentrSpace += hb_bytelen; break; - case nMethod_unloaded: - nBlocks_unloaded++; - unloadedSpace += hb_bytelen; - break; - case nMethod_dead: - nBlocks_dead++; - deadSpace += hb_bytelen; - break; default: break; } @@ -828,7 +783,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular TopSizeArray[0].len = hb_len; TopSizeArray[0].index = tsbStopper; TopSizeArray[0].nm_size = nm_size; - TopSizeArray[0].temperature = temperature; TopSizeArray[0].compiler = cType; TopSizeArray[0].level = comp_lvl; TopSizeArray[0].type = cbType; @@ -846,7 +800,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular TopSizeArray[used_topSizeBlocks].len = hb_len; TopSizeArray[used_topSizeBlocks].index = tsbStopper; TopSizeArray[used_topSizeBlocks].nm_size = nm_size; - TopSizeArray[used_topSizeBlocks].temperature = temperature; TopSizeArray[used_topSizeBlocks].compiler = cType; TopSizeArray[used_topSizeBlocks].level = comp_lvl; TopSizeArray[used_topSizeBlocks].type = cbType; @@ -889,7 +842,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular TopSizeArray[i].len = hb_len; TopSizeArray[i].index = used_topSizeBlocks; TopSizeArray[i].nm_size = nm_size; - TopSizeArray[i].temperature = temperature; TopSizeArray[i].compiler = cType; TopSizeArray[i].level = comp_lvl; TopSizeArray[i].type = cbType; @@ -931,7 +883,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular TopSizeArray[j].len = hb_len; TopSizeArray[j].index = tsbStopper; // already set!! TopSizeArray[i].nm_size = nm_size; - TopSizeArray[i].temperature = temperature; TopSizeArray[j].compiler = cType; TopSizeArray[j].level = comp_lvl; TopSizeArray[j].type = cbType; @@ -947,7 +898,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular TopSizeArray[i].len = hb_len; TopSizeArray[i].index = j; TopSizeArray[i].nm_size = nm_size; - TopSizeArray[i].temperature = temperature; TopSizeArray[i].compiler = cType; TopSizeArray[i].level = comp_lvl; TopSizeArray[i].type = cbType; @@ -999,20 +949,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular StatArray[ix_beg].level = comp_lvl; StatArray[ix_beg].compiler = cType; break; - case nMethod_alive: - StatArray[ix_beg].tx_count++; - StatArray[ix_beg].tx_space += (unsigned short)hb_len; - StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age; - StatArray[ix_beg].level = comp_lvl; - StatArray[ix_beg].compiler = cType; - break; - case nMethod_dead: - case nMethod_unloaded: - StatArray[ix_beg].dead_count++; - StatArray[ix_beg].dead_space += (unsigned short)hb_len; - break; default: - // must be a stub, if it's not a dead or alive nMethod nBlocks_stub++; stubSpace += hb_bytelen; StatArray[ix_beg].stub_count++; @@ -1055,29 +992,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular StatArray[ix_end].level = comp_lvl; StatArray[ix_end].compiler = cType; break; - case nMethod_alive: - StatArray[ix_beg].tx_count++; - StatArray[ix_beg].tx_space += (unsigned short)beg_space; - StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age; - - StatArray[ix_end].tx_count++; - StatArray[ix_end].tx_space += (unsigned short)end_space; - StatArray[ix_end].tx_age = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age; - - StatArray[ix_beg].level = comp_lvl; - StatArray[ix_beg].compiler = cType; - StatArray[ix_end].level = comp_lvl; - StatArray[ix_end].compiler = cType; - break; - case nMethod_dead: - case nMethod_unloaded: - StatArray[ix_beg].dead_count++; - StatArray[ix_beg].dead_space += (unsigned short)beg_space; - StatArray[ix_end].dead_count++; - StatArray[ix_end].dead_space += (unsigned short)end_space; - break; default: - // must be a stub, if it's not a dead or alive nMethod nBlocks_stub++; stubSpace += hb_bytelen; StatArray[ix_beg].stub_count++; @@ -1102,20 +1017,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular StatArray[ix].level = comp_lvl; StatArray[ix].compiler = cType; break; - case nMethod_alive: - StatArray[ix].tx_count++; - StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size); - StatArray[ix].tx_age = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age; - StatArray[ix].level = comp_lvl; - StatArray[ix].compiler = cType; - break; - case nMethod_dead: - case nMethod_unloaded: - StatArray[ix].dead_count++; - StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size); - break; default: - // must be a stub, if it's not a dead or alive nMethod StatArray[ix].stub_count++; StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size); break; @@ -1138,8 +1040,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular ast->print_cr(" Alive Space = " SIZE_FORMAT_W(8) "k, nBlocks_alive = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K, nBlocks_alive, (100.0*aliveSpace)/size, (100.0*aliveSpace)/res_size); ast->print_cr(" disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K, nBlocks_disconn, (100.0*disconnSpace)/size, (100.0*disconnSpace)/res_size); ast->print_cr(" not entrant = " SIZE_FORMAT_W(8) "k, nBlocks_notentr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K, nBlocks_notentr, (100.0*notentrSpace)/size, (100.0*notentrSpace)/res_size); - ast->print_cr(" unloadedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size); - ast->print_cr(" deadSpace = " SIZE_FORMAT_W(8) "k, nBlocks_dead = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K, nBlocks_dead, (100.0*deadSpace)/size, (100.0*deadSpace)/res_size); ast->print_cr(" stubSpace = " SIZE_FORMAT_W(8) "k, nBlocks_stub = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K, nBlocks_stub, (100.0*stubSpace)/size, (100.0*stubSpace)/res_size); ast->print_cr("ZombieBlocks = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb); ast->cr(); @@ -1150,22 +1050,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular ast->print_cr("latest allocated compilation id = %d", latest_compilation_id); ast->print_cr("highest observed compilation id = %d", highest_compilation_id); ast->print_cr("Building TopSizeList iterations = %ld", total_iterations); - ast->cr(); - - int reset_val = NMethodSweeper::hotness_counter_reset_val(); - double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size; - printBox(ast, '-', "Method hotness information at time of this analysis", NULL); - ast->print_cr("Highest possible method temperature: %12d", reset_val); - ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity); - if (n_methods > 0) { - avgTemp = hotnessAccumulator/n_methods; - ast->print_cr("min. hotness = %6d", minTemp); - ast->print_cr("avg. hotness = %6d", avgTemp); - ast->print_cr("max. hotness = %6d", maxTemp); - } else { - avgTemp = 0; - ast->print_cr("No hotness data available"); - } BUFFEREDSTREAM_FLUSH("\n") // This loop is intentionally printing directly to "out". @@ -1185,9 +1069,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular if (StatArray[ix].stub_count > granule_segs) { out->print_cr("stub_count[%d] = %d", ix, StatArray[ix].stub_count); } - if (StatArray[ix].dead_count > granule_segs) { - out->print_cr("dead_count[%d] = %d", ix, StatArray[ix].dead_count); - } if (StatArray[ix].t1_space > granule_segs) { out->print_cr("t1_space[%d] = %d", ix, StatArray[ix].t1_space); } @@ -1200,14 +1081,11 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular if (StatArray[ix].stub_space > granule_segs) { out->print_cr("stub_space[%d] = %d", ix, StatArray[ix].stub_space); } - if (StatArray[ix].dead_space > granule_segs) { - out->print_cr("dead_space[%d] = %d", ix, StatArray[ix].dead_space); - } // this cast is awful! I need it because NT/Intel reports a signed/unsigned mismatch. - if ((size_t)(StatArray[ix].t1_count+StatArray[ix].t2_count+StatArray[ix].tx_count+StatArray[ix].stub_count+StatArray[ix].dead_count) > granule_segs) { + if ((size_t)(StatArray[ix].t1_count+StatArray[ix].t2_count+StatArray[ix].tx_count+StatArray[ix].stub_count) > granule_segs) { out->print_cr("t1_count[%d] = %d, t2_count[%d] = %d, tx_count[%d] = %d, stub_count[%d] = %d", ix, StatArray[ix].t1_count, ix, StatArray[ix].t2_count, ix, StatArray[ix].tx_count, ix, StatArray[ix].stub_count); } - if ((size_t)(StatArray[ix].t1_space+StatArray[ix].t2_space+StatArray[ix].tx_space+StatArray[ix].stub_space+StatArray[ix].dead_space) > granule_segs) { + if ((size_t)(StatArray[ix].t1_space+StatArray[ix].t2_space+StatArray[ix].tx_space+StatArray[ix].stub_space) > granule_segs) { out->print_cr("t1_space[%d] = %d, t2_space[%d] = %d, tx_space[%d] = %d, stub_space[%d] = %d", ix, StatArray[ix].t1_space, ix, StatArray[ix].t2_space, ix, StatArray[ix].tx_space, ix, StatArray[ix].stub_space); } } @@ -1377,7 +1255,7 @@ void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) { ast->print("%9s", "compiler"); ast->fill_to(66); ast->print_cr("%6s", "method"); - ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name"); + ast->print_cr("%18s %13s %17s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", "Name"); BUFFEREDSTREAM_FLUSH_LOCKED("") //---< print Top Ten Used Blocks >--- @@ -1420,14 +1298,8 @@ void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) { //---< compiler information >--- ast->fill_to(56); ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level); - //---< method temperature >--- - ast->fill_to(67); - ast->print("%5d", TopSizeArray[i].temperature); //---< name and signature >--- ast->fill_to(67+6); - if (TopSizeArray[i].type == nMethod_dead) { - ast->print(" zombie method "); - } ast->print("%s", TopSizeArray[i].blob_name); } else { //---< block size in hex >--- @@ -1772,7 +1644,7 @@ void CodeHeapState::print_count(outputStream* out, CodeHeap* heap) { for (unsigned int ix = 0; ix < alloc_granules; ix++) { print_line_delim(out, ast, low_bound, ix, granules_per_line); unsigned int count = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count - + StatArray[ix].stub_count + StatArray[ix].dead_count; + + StatArray[ix].stub_count; print_count_single(ast, count); } } @@ -1859,29 +1731,9 @@ void CodeHeapState::print_count(outputStream* out, CodeHeap* heap) { BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n") } - { - if (nBlocks_dead > 0) { - printBox(ast, '-', "Dead nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL); - - granules_per_line = 128; - for (unsigned int ix = 0; ix < alloc_granules; ix++) { - print_line_delim(out, ast, low_bound, ix, granules_per_line); - if (segment_granules && StatArray[ix].dead_count > 0) { - print_blobType_single(ast, StatArray[ix].type); - } else { - print_count_single(ast, StatArray[ix].dead_count); - } - } - ast->print("|"); - } else { - ast->print("No dead nMethods found in CodeHeap."); - } - BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n") - } - { if (!segment_granules) { // Prevent totally redundant printouts - printBox(ast, '-', "Count by tier (combined, no dead blocks): <#t1>:<#t2>:<#s>, 0x0..0xf. '*' indicates >= 16 blocks", NULL); + printBox(ast, '-', "Count by tier (combined): <#t1>:<#t2>:<#s>, 0x0..0xf. '*' indicates >= 16 blocks", NULL); granules_per_line = 24; for (unsigned int ix = 0; ix < alloc_granules; ix++) { @@ -1953,7 +1805,7 @@ void CodeHeapState::print_space(outputStream* out, CodeHeap* heap) { for (unsigned int ix = 0; ix < alloc_granules; ix++) { print_line_delim(out, ast, low_bound, ix, granules_per_line); unsigned int space = StatArray[ix].t1_space + StatArray[ix].t2_space + StatArray[ix].tx_space - + StatArray[ix].stub_space + StatArray[ix].dead_space; + + StatArray[ix].stub_space; print_space_single(ast, space); } } @@ -2040,22 +1892,6 @@ void CodeHeapState::print_space(outputStream* out, CodeHeap* heap) { BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n") } - { - if (nBlocks_dead > 0) { - printBox(ast, '-', "Dead space consumption. ' ' indicates empty, '*' indicates full", NULL); - - granules_per_line = 128; - for (unsigned int ix = 0; ix < alloc_granules; ix++) { - print_line_delim(out, ast, low_bound, ix, granules_per_line); - print_space_single(ast, StatArray[ix].dead_space); - } - ast->print("|"); - } else { - ast->print("No dead nMethods found in CodeHeap."); - } - BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n") - } - { if (!segment_granules) { // Prevent totally redundant printouts printBox(ast, '-', "Space consumption by tier (combined): ::. ' ' indicates empty, '*' indicates full", NULL); @@ -2250,7 +2086,7 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) { } // Only check granule if it contains at least one blob. unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count + - StatArray[ix].stub_count + StatArray[ix].dead_count; + StatArray[ix].stub_count; if (nBlobs > 0 ) { for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) { // heap->find_start() is safe. Only works on _segmap. @@ -2293,7 +2129,7 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) { ast->print("%9s", "compiler"); ast->fill_to(61); ast->print_cr("%6s", "method"); - ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name"); + ast->print_cr("%18s %13s %17s %9s %18s %s", "Addr(module) ", "offset", "size", " type lvl", "blobType ", "Name"); BUFFEREDSTREAM_FLUSH_AUTO("") } @@ -2310,7 +2146,6 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) { ResourceMark rm; //---< collect all data to locals as quickly as possible >--- unsigned int total_size = nm->total_size(); - int hotness = nm->hotness_counter(); bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused); //---< nMethod size in hex >--- ast->print(PTR32_FORMAT, total_size); @@ -2318,16 +2153,10 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) { //---< compiler information >--- ast->fill_to(51); ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level); - //---< method temperature >--- - ast->fill_to(62); - ast->print("%5d", hotness); //---< name and signature >--- - ast->fill_to(62+6); + ast->fill_to(62); ast->print("%s", blobTypeName[cbType]); - ast->fill_to(82+6); - if (cbType == nMethod_dead) { - ast->print("%14s", " zombie method"); - } + ast->fill_to(82); if (get_name) { Symbol* methName = method->name(); @@ -2347,12 +2176,12 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) { ast->print("%s", blob_name); } } else if (blob_is_safe) { - ast->fill_to(62+6); + ast->fill_to(62); ast->print("%s", blobTypeName[cbType]); - ast->fill_to(82+6); + ast->fill_to(82); ast->print("%s", blob_name); } else { - ast->fill_to(62+6); + ast->fill_to(62); ast->print(""); } ast->cr(); @@ -2534,12 +2363,9 @@ CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) { if (holding_required_locks()) { nmethod* nm = cb->as_nmethod_or_null(); if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb. - if (nm->is_zombie()) return nMethod_dead; - if (nm->is_unloaded()) return nMethod_unloaded; if (nm->is_in_use()) return nMethod_inuse; - if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused; - if (nm->is_alive()) return nMethod_alive; - return nMethod_dead; + if (!nm->is_not_entrant()) return nMethod_notused; + return nMethod_notentrant; } } } @@ -2558,7 +2384,7 @@ bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob) { // make sure the nmethod at hand (and the linked method) is not garbage. bool CodeHeapState::nmethod_access_is_safe(nmethod* nm) { Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() was found to be uninitialized, i.e. != NULL, but invalid. - return (nm != NULL) && (method != NULL) && nm->is_alive() && (method->signature() != NULL); + return (nm != NULL) && (method != NULL) && (method->signature() != NULL); } bool CodeHeapState::holding_required_locks() { diff --git a/src/hotspot/share/code/codeHeapState.hpp b/src/hotspot/share/code/codeHeapState.hpp index 7ce219e2d83d7..1bd41fdda7254 100644 --- a/src/hotspot/share/code/codeHeapState.hpp +++ b/src/hotspot/share/code/codeHeapState.hpp @@ -52,12 +52,7 @@ class CodeHeapState : public CHeapObj { nMethod_inuse, // executable. This is the "normal" state for a nmethod. nMethod_notused, // assumed inactive, marked not entrant. Could be revived if necessary. nMethod_notentrant, // no new activations allowed, marked for deoptimization. Old activations may still exist. - // Will transition to "zombie" after all activations are gone. - nMethod_zombie, // No more activations exist, ready for purge (remove from code cache). - nMethod_unloaded, // No activations exist, should not be called. Transient state on the way to "zombie". - nMethod_alive = nMethod_notentrant, // Combined state: nmethod may have activations, thus can't be purged. - nMethod_dead = nMethod_zombie, // Combined state: nmethod does not have any activations. - runtimeStub = nMethod_unloaded + 1, + runtimeStub, ricochetStub, deoptimizationStub, uncommonTrapStub, diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index f0329ba214249..715a79ab11a0d 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -68,7 +68,7 @@ bool CompiledICLocker::is_safe(CompiledMethod* method) { } bool CompiledICLocker::is_safe(address code) { - CodeBlob* cb = CodeCache::find_blob_unsafe(code); + CodeBlob* cb = CodeCache::find_blob(code); assert(cb != NULL && cb->is_compiled(), "must be compiled"); CompiledMethod* cm = cb->as_compiled_method(); return CompiledICProtectionBehaviour::current()->is_safe(cm); @@ -128,7 +128,7 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub } { - CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address()); + CodeBlob* cb = CodeCache::find_blob(_call->instruction_address()); assert(cb != NULL && cb->is_compiled(), "must be compiled"); _call->set_destination_mt_safe(entry_point); } @@ -317,10 +317,7 @@ bool CompiledIC::is_megamorphic() const { bool CompiledIC::is_call_to_compiled() const { assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); - // Use unsafe, since an inline cache might point to a zombie method. However, the zombie - // method is guaranteed to still exist, since we only remove methods after all inline caches - // has been cleaned up - CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); + CodeBlob* cb = CodeCache::find_blob(ic_destination()); bool is_monomorphic = (cb != NULL && cb->is_compiled()); // Check that the cached_value is a klass for non-optimized monomorphic calls // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used @@ -328,12 +325,11 @@ bool CompiledIC::is_call_to_compiled() const { // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites. #ifdef ASSERT - CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); + CodeBlob* caller = CodeCache::find_blob(instruction_address()); bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci(); assert( is_c1_or_jvmci_method || !is_monomorphic || is_optimized() || - !caller->is_alive() || (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); #endif // ASSERT return is_monomorphic; @@ -346,10 +342,7 @@ bool CompiledIC::is_call_to_interpreted() const { // is optimized), or calling to an I2C blob bool is_call_to_interpreted = false; if (!is_optimized()) { - // must use unsafe because the destination can be a zombie (and we're cleaning) - // and the print_compiled_ic code wants to know if site (in the non-zombie) - // is to the interpreter. - CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); + CodeBlob* cb = CodeCache::find_blob(ic_destination()); is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); } else { @@ -374,8 +367,6 @@ bool CompiledIC::set_to_clean(bool in_use) { address entry = _call->get_resolve_call_stub(is_optimized()); - // A zombie transition will always be safe, since the metadata has already been set to NULL, so - // we only need to patch the destination bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); if (safe_transition) { @@ -460,7 +451,7 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) { // Call to compiled code bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); #ifdef ASSERT - CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); + CodeBlob* cb = CodeCache::find_blob(info.entry()); assert (cb != NULL && cb->is_compiled(), "must be compiled!"); #endif /* ASSERT */ @@ -560,7 +551,7 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method, bool CompiledIC::is_icholder_entry(address entry) { - CodeBlob* cb = CodeCache::find_blob_unsafe(entry); + CodeBlob* cb = CodeCache::find_blob(entry); if (cb != NULL && cb->is_adapter_blob()) { return true; } diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp index d052ee8722053..6e83e17b48211 100644 --- a/src/hotspot/share/code/compiledMethod.cpp +++ b/src/hotspot/share/code/compiledMethod.cpp @@ -106,10 +106,6 @@ const char* CompiledMethod::state() const { return "not_used"; case not_entrant: return "not_entrant"; - case zombie: - return "zombie"; - case unloaded: - return "unloaded"; default: fatal("unexpected method state: %d", state); return NULL; @@ -310,7 +306,7 @@ ScopeDesc* CompiledMethod::scope_desc_near(address pc) { } address CompiledMethod::oops_reloc_begin() const { - // If the method is not entrant or zombie then a JMP is plastered over the + // If the method is not entrant then a JMP is plastered over the // first few bytes. If an oop in the old code was there, that oop // should not get GC'd. Skip the first few bytes of oops on // not-entrant methods. @@ -428,11 +424,7 @@ Method* CompiledMethod::attached_method_before_pc(address pc) { } void CompiledMethod::clear_inline_caches() { - assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); - if (is_zombie()) { - return; - } - + assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint"); RelocIterator iter(this); while (iter.next()) { iter.reloc()->clear_inline_cache(); @@ -516,47 +508,11 @@ bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { template static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, bool clean_all) { - // Ok, to lookup references to zombies here - CodeBlob *cb = CodeCache::find_blob_unsafe(addr); + CodeBlob *cb = CodeCache::find_blob(addr); CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; if (nm != NULL) { - // Clean inline caches pointing to both zombie and not_entrant methods + // Clean inline caches pointing to bad nmethods if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { - // Inline cache cleaning should only be initiated on CompiledMethods that have been - // observed to be is_alive(). However, with concurrent code cache unloading, it is - // possible that by now, the state has become !is_alive. This can happen in two ways: - // 1) It can be racingly flipped to unloaded if the nmethod // being cleaned (from the - // sweeper) is_unloading(). This is fine, because if that happens, then the inline - // caches have already been cleaned under the same CompiledICLocker that we now hold during - // inline cache cleaning, and we will simply walk the inline caches again, and likely not - // find much of interest to clean. However, this race prevents us from asserting that the - // nmethod is_alive(). The is_unloading() function is completely monotonic; once set due - // to an oop dying, it remains set forever until freed. Because of that, all unloaded - // nmethods are is_unloading(), but notably, an unloaded nmethod may also subsequently - // become zombie (when the sweeper converts it to zombie). - // 2) It can be racingly flipped to zombie if the nmethod being cleaned (by the concurrent - // GC) cleans a zombie nmethod that is concurrently made zombie by the sweeper. In this - // scenario, the sweeper will first transition the nmethod to zombie, and then when - // unregistering from the GC, it will wait until the GC is done. The GC will then clean - // the inline caches *with IC stubs*, even though no IC stubs are needed. This is fine, - // as long as the IC stubs are guaranteed to be released until the next safepoint, where - // IC finalization requires live IC stubs to not be associated with zombie nmethods. - // This is guaranteed, because the sweeper does not have a single safepoint check until - // after it completes the whole transition function; it will wake up after the GC is - // done with concurrent code cache cleaning (which blocks out safepoints using the - // suspendible threads set), and then call clear_ic_callsites, which will release the - // associated IC stubs, before a subsequent safepoint poll can be reached. This - // guarantees that the spuriously created IC stubs are released appropriately before - // IC finalization in a safepoint gets to run. Therefore, this race is fine. This is also - // valid in a scenario where an inline cache of a zombie nmethod gets a spurious IC stub, - // and then when cleaning another inline cache, fails to request an IC stub because we - // exhausted the IC stub buffer. In this scenario, the GC will request a safepoint after - // yielding the suspendible therad set, effectively unblocking safepoints. Before such - // a safepoint can be reached, the sweeper similarly has to wake up, clear the IC stubs, - // and reach the next safepoint poll, after the whole transition function has completed. - // Due to the various races that can cause an nmethod to first be is_alive() and then - // racingly become !is_alive(), it is unfortunately not possible to assert the nmethod - // is_alive(), !is_unloaded() or !is_zombie() here. if (!ic->set_to_clean(!from->is_unloading())) { return false; } @@ -618,40 +574,24 @@ void CompiledMethod::run_nmethod_entry_barrier() { } } -void CompiledMethod::cleanup_inline_caches(bool clean_all) { - for (;;) { - ICRefillVerifier ic_refill_verifier; - { CompiledICLocker ic_locker(this); - if (cleanup_inline_caches_impl(false, clean_all)) { - return; - } - } - // Call this nmethod entry barrier from the sweeper. - run_nmethod_entry_barrier(); - if (!clean_all) { - MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeCache::Sweep::end(); - } - InlineCacheBuffer::refill_ic_stubs(); - if (!clean_all) { - MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeCache::Sweep::begin(); - } - } +// Only called by whitebox test +void CompiledMethod::cleanup_inline_caches_whitebox() { + assert_locked_or_safepoint(CodeCache_lock); + CompiledICLocker ic_locker(this); + guarantee(cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */), + "Inline cache cleaning in a safepoint can't fail"); } address* CompiledMethod::orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + orig_pc_offset()); } -// Called to clean up after class unloading for live nmethods and from the sweeper -// for all methods. +// Called to clean up after class unloading for live nmethods bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { assert(CompiledICLocker::is_safe(this), "mt unsafe call"); ResourceMark rm; - // Find all calls in an nmethod and clear the ones that point to non-entrant, - // zombie and unloaded nmethods. + // Find all calls in an nmethod and clear the ones that point to bad nmethods. RelocIterator iter(this, oops_reloc_begin()); bool is_in_static_stub = false; while(iter.next()) { diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp index f4f40c5a27e1d..00e62ed96950b 100644 --- a/src/hotspot/share/code/compiledMethod.hpp +++ b/src/hotspot/share/code/compiledMethod.hpp @@ -140,7 +140,6 @@ class PcDescContainer { class CompiledMethod : public CodeBlob { friend class VMStructs; - friend class NMethodSweeper; void init_defaults(); protected: @@ -204,11 +203,7 @@ class CompiledMethod : public CodeBlob { // allowed to advance state in_use = 0, // executable nmethod not_used = 1, // not entrant, but revivable - not_entrant = 2, // marked for deoptimization but activations may still exist, - // will be transformed to zombie when all activations are gone - unloaded = 3, // there should be no activations, should not be called, will be - // transformed to zombie by the sweeper, when not "locked in vm". - zombie = 4 // no activations exist, nmethod is ready for purge + not_entrant = 2, // marked for deoptimization but activations may still exist }; virtual bool is_in_use() const = 0; @@ -222,7 +217,6 @@ class CompiledMethod : public CodeBlob { virtual bool make_not_entrant() = 0; virtual bool make_entrant() = 0; virtual address entry_point() const = 0; - virtual bool make_zombie() = 0; virtual bool is_osr_method() const = 0; virtual int osr_entry_bci() const = 0; Method* method() const { return _method; } @@ -344,7 +338,6 @@ class CompiledMethod : public CodeBlob { address* orig_pc_addr(const frame* fr); public: - virtual bool can_convert_to_zombie() = 0; virtual const char* compile_kind() const = 0; virtual int get_state() const = 0; @@ -369,8 +362,8 @@ class CompiledMethod : public CodeBlob { address continuation_for_implicit_exception(address pc, bool for_div0_check); public: - // Serial version used by sweeper and whitebox test - void cleanup_inline_caches(bool clean_all); + // Serial version used by whitebox test + void cleanup_inline_caches_whitebox(); virtual void clear_inline_caches(); void clear_ic_callsites(); diff --git a/src/hotspot/share/code/compressedStream.cpp b/src/hotspot/share/code/compressedStream.cpp index 5a21bc234e62c..47eb2969c40e5 100644 --- a/src/hotspot/share/code/compressedStream.cpp +++ b/src/hotspot/share/code/compressedStream.cpp @@ -25,16 +25,12 @@ #include "precompiled.hpp" #include "code/compressedStream.hpp" #include "utilities/ostream.hpp" +#include "utilities/moveBits.hpp" // 32-bit self-inverse encoding of float bits // converts trailing zeroes (common in floats) to leading zeroes inline juint CompressedStream::reverse_int(juint i) { - // Hacker's Delight, Figure 7-1 - i = (i & 0x55555555) << 1 | ((i >> 1) & 0x55555555); - i = (i & 0x33333333) << 2 | ((i >> 2) & 0x33333333); - i = (i & 0x0f0f0f0f) << 4 | ((i >> 4) & 0x0f0f0f0f); - i = (i << 24) | ((i & 0xff00) << 8) | ((i >> 8) & 0xff00) | (i >> 24); - return i; + return reverse_bits(i); } jint CompressedReadStream::read_signed_int() { diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp index eb577f6699d0d..1b3f3cf789f96 100644 --- a/src/hotspot/share/code/dependencyContext.cpp +++ b/src/hotspot/share/code/dependencyContext.cpp @@ -68,9 +68,7 @@ int DependencyContext::mark_dependent_nmethods(DepChange& changes) { int found = 0; for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) { nmethod* nm = b->get_nmethod(); - // since dependencies aren't removed until an nmethod becomes a zombie, - // the dependency list may contain nmethods which aren't alive. - if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { + if (b->count() > 0 && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { if (TraceDependencies) { ResourceMark rm; tty->print_cr("Marked for deoptimization"); @@ -137,40 +135,6 @@ void DependencyContext::release(nmethodBucket* b) { } } -// -// Remove an nmethod dependency from the context. -// Decrement count of the nmethod in the dependency list and, optionally, remove -// the bucket completely when the count goes to 0. This method must find -// a corresponding bucket otherwise there's a bug in the recording of dependencies. -// Can be called concurrently by parallel GC threads. -// -void DependencyContext::remove_dependent_nmethod(nmethod* nm) { - assert_locked_or_safepoint(CodeCache_lock); - nmethodBucket* first = dependencies_not_unloading(); - nmethodBucket* last = NULL; - for (nmethodBucket* b = first; b != NULL; b = b->next_not_unloading()) { - if (nm == b->get_nmethod()) { - int val = b->decrement(); - guarantee(val >= 0, "Underflow: %d", val); - if (val == 0) { - if (last == NULL) { - // If there was not a head that was not unloading, we can set a new - // head without a CAS, because we know there is no contending cleanup. - set_dependencies(b->next_not_unloading()); - } else { - // Only supports a single inserting thread (protected by CodeCache_lock) - // for now. Therefore, the next pointer only competes with another cleanup - // operation. That interaction does not need a CAS. - last->set_next(b->next_not_unloading()); - } - release(b); - } - return; - } - last = b; - } -} - // // Reclaim all unused buckets. // @@ -225,7 +189,7 @@ int DependencyContext::remove_and_mark_for_deoptimization_all_dependents() { int marked = 0; while (b != NULL) { nmethod* nm = b->get_nmethod(); - if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) { + if (b->count() > 0 && !nm->is_marked_for_deoptimization()) { nm->mark_for_deoptimization(); marked++; } diff --git a/src/hotspot/share/code/dependencyContext.hpp b/src/hotspot/share/code/dependencyContext.hpp index 68611c8168060..9dcf28d796a2d 100644 --- a/src/hotspot/share/code/dependencyContext.hpp +++ b/src/hotspot/share/code/dependencyContext.hpp @@ -119,7 +119,6 @@ class DependencyContext : public StackObj { int mark_dependent_nmethods(DepChange& changes); void add_dependent_nmethod(nmethod* nm); - void remove_dependent_nmethod(nmethod* nm); void remove_all_dependents(); int remove_and_mark_for_deoptimization_all_dependents(); void clean_unloading_dependents(); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 2e9945ed141a0..bcc30e6e63666 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -33,8 +33,10 @@ #include "code/nmethod.hpp" #include "code/scopeDesc.hpp" #include "compiler/abstractCompiler.hpp" +#include "compiler/compilationLog.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" +#include "compiler/compileTask.hpp" #include "compiler/compilerDirectives.hpp" #include "compiler/directivesParser.hpp" #include "compiler/disassembler.hpp" @@ -70,7 +72,6 @@ #include "runtime/serviceThread.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" -#include "runtime/sweeper.hpp" #include "runtime/threadWXSetters.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/align.hpp" @@ -441,14 +442,7 @@ const char* nmethod::compile_kind() const { void nmethod::init_defaults() { _state = not_installed; _has_flushed_dependencies = 0; - _lock_count = 0; - _stack_traversal_mark = 0; _load_reported = false; // jvmti state - _unload_reported = false; - -#ifdef ASSERT - _oops_are_stale = false; -#endif _oops_do_mark_link = NULL; _osr_link = NULL; @@ -611,6 +605,7 @@ nmethod::nmethod( ByteSize basic_lock_sp_offset, OopMapSet* oop_maps ) : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), + _unlinked_next(NULL), _native_receiver_sp_offset(basic_lock_owner_sp_offset), _native_basic_lock_sp_offset(basic_lock_sp_offset), _is_unloading_state(0) @@ -630,7 +625,7 @@ nmethod::nmethod( // values something that will never match a pc like the nmethod vtable entry _exception_offset = 0; _orig_pc_offset = 0; - _gc_epoch = Continuations::gc_epoch(); + _gc_epoch = CodeCache::gc_epoch(); _consts_offset = data_offset(); _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); @@ -654,7 +649,6 @@ nmethod::nmethod( _osr_entry_point = NULL; _exception_cache = NULL; _pc_desc_container.reset_to(NULL); - _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); @@ -746,6 +740,7 @@ nmethod::nmethod( #endif ) : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), + _unlinked_next(NULL), _native_receiver_sp_offset(in_ByteSize(-1)), _native_basic_lock_sp_offset(in_ByteSize(-1)), _is_unloading_state(0) @@ -763,8 +758,7 @@ nmethod::nmethod( _compile_id = compile_id; _comp_level = comp_level; _orig_pc_offset = orig_pc_offset; - _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); - _gc_epoch = Continuations::gc_epoch(); + _gc_epoch = CodeCache::gc_epoch(); // Section offsets _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); @@ -937,7 +931,7 @@ void nmethod::print_on(outputStream* st, const char* msg) const { } } -void nmethod::maybe_print_nmethod(DirectiveSet* directive) { +void nmethod::maybe_print_nmethod(const DirectiveSet* directive) { bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption; if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { print_nmethod(printnmethods); @@ -945,8 +939,6 @@ void nmethod::maybe_print_nmethod(DirectiveSet* directive) { } void nmethod::print_nmethod(bool printmethod) { - run_nmethod_entry_barrier(); // ensure all embedded OOPs are valid before printing - ttyLocker ttyl; // keep the following output all in one block if (xtty != NULL) { xtty->begin_head("print_nmethod"); @@ -1120,7 +1112,6 @@ void nmethod::make_deoptimized() { } assert(method() == NULL || can_be_deoptimized(), ""); - assert(!is_zombie(), ""); CompiledICLocker ml(this); assert(CompiledICLocker::is_safe(this), "mt unsafe call"); @@ -1172,12 +1163,11 @@ void nmethod::verify_clean_inline_caches() { case relocInfo::virtual_call_type: case relocInfo::opt_virtual_call_type: { CompiledIC *ic = CompiledIC_at(&iter); - // Ok, to lookup references to zombies here - CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); + CodeBlob *cb = CodeCache::find_blob(ic->ic_destination()); assert(cb != NULL, "destination not in CodeBlob?"); nmethod* nm = cb->as_nmethod_or_null(); if( nm != NULL ) { - // Verify that inline caches pointing to both zombie and not_entrant methods are clean + // Verify that inline caches pointing to bad nmethods are clean if (!nm->is_in_use() || (nm->method()->code() != nm)) { assert(ic->is_clean(), "IC should be clean"); } @@ -1186,11 +1176,11 @@ void nmethod::verify_clean_inline_caches() { } case relocInfo::static_call_type: { CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); - CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); + CodeBlob *cb = CodeCache::find_blob(csc->destination()); assert(cb != NULL, "destination not in CodeBlob?"); nmethod* nm = cb->as_nmethod_or_null(); if( nm != NULL ) { - // Verify that inline caches pointing to both zombie and not_entrant methods are clean + // Verify that inline caches pointing to bad nmethods are clean if (!nm->is_in_use() || (nm->method()->code() != nm)) { assert(csc->is_clean(), "IC should be clean"); } @@ -1203,49 +1193,14 @@ void nmethod::verify_clean_inline_caches() { } } -// This is a private interface with the sweeper. -void nmethod::mark_as_seen_on_stack() { - assert(is_alive(), "Must be an alive method"); - // Set the traversal mark to ensure that the sweeper does 2 - // cleaning passes before moving to zombie. - set_stack_traversal_mark(NMethodSweeper::traversal_count()); -} - -void nmethod::mark_as_maybe_on_continuation() { - assert(is_alive(), "Must be an alive method"); - _gc_epoch = Continuations::gc_epoch(); +void nmethod::mark_as_maybe_on_stack() { + Atomic::store(&_gc_epoch, CodeCache::gc_epoch()); } -bool nmethod::is_maybe_on_continuation_stack() { - if (!Continuations::enabled()) { - return false; - } - +bool nmethod::is_maybe_on_stack() { // If the condition below is true, it means that the nmethod was found to // be alive the previous completed marking cycle. - return _gc_epoch >= Continuations::previous_completed_gc_marking_cycle(); -} - -// Tell if a non-entrant method can be converted to a zombie (i.e., -// there are no activations on the stack, not in use by the VM, -// and not in use by the ServiceThread) -bool nmethod::can_convert_to_zombie() { - // Note that this is called when the sweeper has observed the nmethod to be - // not_entrant. However, with concurrent code cache unloading, the state - // might have moved on to unloaded if it is_unloading(), due to racing - // concurrent GC threads. - assert(is_not_entrant() || is_unloading() || - !Thread::current()->is_Code_cache_sweeper_thread(), - "must be a non-entrant method if called from sweeper"); - - // Since the nmethod sweeper only does partial sweep the sweeper's traversal - // count can be greater than the stack traversal count before it hits the - // nmethod for the second time. - // If an is_unloading() nmethod is still not_entrant, then it is not safe to - // convert it to zombie due to GC unloading interactions. However, if it - // has become unloaded, then it is okay to convert such nmethods to zombie. - return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && !is_maybe_on_continuation_stack() && - !is_locked_by_vm() && (!is_unloading() || is_unloaded()); + return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle(); } void nmethod::inc_decompile_count() { @@ -1261,118 +1216,14 @@ void nmethod::inc_decompile_count() { bool nmethod::try_transition(int new_state_int) { signed char new_state = new_state_int; -#ifdef ASSERT - if (new_state != unloaded) { - assert_lock_strong(CompiledMethod_lock); - } -#endif - for (;;) { - signed char old_state = Atomic::load(&_state); - if (old_state >= new_state) { - // Ensure monotonicity of transitions. - return false; - } - if (Atomic::cmpxchg(&_state, old_state, new_state) == old_state) { - return true; - } - } -} - -void nmethod::make_unloaded() { - post_compiled_method_unload(); - - // This nmethod is being unloaded, make sure that dependencies - // recorded in instanceKlasses get flushed. - // Since this work is being done during a GC, defer deleting dependencies from the - // InstanceKlass. - assert(Universe::heap()->is_gc_active() || - Thread::current()->is_ConcurrentGC_thread() || - Thread::current()->is_Worker_thread(), - "should only be called during gc"); - flush_dependencies(/*delete_immediately*/false); - - // Break cycle between nmethod & method - LogTarget(Trace, class, unload, nmethod) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - ls.print("making nmethod " INTPTR_FORMAT - " unloadable, Method*(" INTPTR_FORMAT - ") ", - p2i(this), p2i(_method)); - ls.cr(); - } - // Unlink the osr method, so we do not look this up again - if (is_osr_method()) { - // Invalidate the osr nmethod only once. Note that with concurrent - // code cache unloading, OSR nmethods are invalidated before they - // are made unloaded. Therefore, this becomes a no-op then. - if (is_in_use()) { - invalidate_osr_method(); - } -#ifdef ASSERT - if (method() != NULL) { - // Make sure osr nmethod is invalidated, i.e. not on the list - bool found = method()->method_holder()->remove_osr_nmethod(this); - assert(!found, "osr nmethod should have been invalidated"); - } -#endif - } - - // If _method is already NULL the Method* is about to be unloaded, - // so we don't have to break the cycle. Note that it is possible to - // have the Method* live here, in case we unload the nmethod because - // it is pointing to some oop (other than the Method*) being unloaded. - if (_method != NULL) { - _method->unlink_code(this); - } - - // Make the class unloaded - i.e., change state and notify sweeper - assert(SafepointSynchronize::is_at_safepoint() || - Thread::current()->is_ConcurrentGC_thread() || - Thread::current()->is_Worker_thread(), - "must be at safepoint"); - - { - // Clear ICStubs and release any CompiledICHolders. - CompiledICLocker ml(this); - clear_ic_callsites(); - } - - // Unregister must be done before the state change - { - MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, - Mutex::_no_safepoint_check_flag); - Universe::heap()->unregister_nmethod(this); - } - - // Clear the method of this dead nmethod - set_method(NULL); - - // Log the unloading. - log_state_change(); - - // The Method* is gone at this point - assert(_method == NULL, "Tautology"); - - set_osr_link(NULL); - NMethodSweeper::report_state_change(this); - - bool transition_success = try_transition(unloaded); - - // It is an important invariant that there exists no race between - // the sweeper and GC thread competing for making the same nmethod - // zombie and unloaded respectively. This is ensured by - // can_convert_to_zombie() returning false for any is_unloading() - // nmethod, informing the sweeper not to step on any GC toes. - assert(transition_success, "Invalid nmethod transition to unloaded"); - -#if INCLUDE_JVMCI - // Clear the link between this nmethod and a HotSpotNmethod mirror - JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); - if (nmethod_data != NULL) { - nmethod_data->invalidate_nmethod_mirror(this); + assert_lock_strong(CompiledMethod_lock); + signed char old_state = _state; + if (old_state >= new_state) { + // Ensure monotonicity of transitions. + return false; } -#endif + Atomic::store(&_state, new_state); + return true; } void nmethod::invalidate_osr_method() { @@ -1387,24 +1238,17 @@ void nmethod::log_state_change() const { if (LogCompilation) { if (xtty != NULL) { ttyLocker ttyl; // keep the following output all in one block - if (_state == unloaded) { - xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", - os::current_thread_id()); - } else { - xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", - os::current_thread_id(), - (_state == zombie ? " zombie='1'" : "")); - } + xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'", + os::current_thread_id()); log_identity(xtty); xtty->stamp(); xtty->end_elem(); } } - const char *state_msg = _state == zombie ? "made zombie" : "made not entrant"; - CompileTask::print_ul(this, state_msg); - if (PrintCompilation && _state != unloaded) { - print_on(tty, state_msg); + CompileTask::print_ul(this, "made not entrant"); + if (PrintCompilation) { + print_on(tty, "made not entrant"); } } @@ -1414,13 +1258,18 @@ void nmethod::unlink_from_method() { } } -/** - * Common functionality for both make_not_entrant and make_zombie - */ -bool nmethod::make_not_entrant_or_zombie(int state) { - assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); +// Invalidate code +bool nmethod::make_not_entrant() { + // This can be called while the system is already at a safepoint which is ok + NoSafepointVerifier nsv; - if (Atomic::load(&_state) >= state) { + if (is_unloading()) { + // If the nmethod is unloading, then it is already not entrant through + // the nmethod entry barriers. No need to do anything; GC will unload it. + return false; + } + + if (Atomic::load(&_state) == not_entrant) { // Avoid taking the lock if already in required state. // This is safe from races because the state is an end-state, // which the nmethod cannot back out of once entered. @@ -1428,78 +1277,44 @@ bool nmethod::make_not_entrant_or_zombie(int state) { return false; } - // Make sure the nmethod is not flushed. - nmethodLocker nml(this); - // This can be called while the system is already at a safepoint which is ok - NoSafepointVerifier nsv; - - // during patching, depending on the nmethod state we must notify the GC that - // code has been unloaded, unregistering it. We cannot do this right while - // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This - // would be prone to deadlocks. - // This flag is used to remember whether we need to later lock and unregister. - bool nmethod_needs_unregister = false; - { // Enter critical section. Does not block for safepoint. MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); - // This logic is equivalent to the logic below for patching the - // verified entry point of regular methods. We check that the - // nmethod is in use to ensure that it is invalidated only once. - if (is_osr_method() && is_in_use()) { - // this effectively makes the osr nmethod not entrant - invalidate_osr_method(); - } - - if (Atomic::load(&_state) >= state) { + if (Atomic::load(&_state) == not_entrant) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false; } - // The caller can be calling the method statically or through an inline - // cache call. - if (!is_osr_method() && !is_not_entrant()) { + if (is_osr_method()) { + // This logic is equivalent to the logic below for patching the + // verified entry point of regular methods. + // this effectively makes the osr nmethod not entrant + invalidate_osr_method(); + } else { + // The caller can be calling the method statically or through an inline + // cache call. NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), - SharedRuntime::get_handle_wrong_method_stub()); + SharedRuntime::get_handle_wrong_method_stub()); } - if (is_in_use() && update_recompile_counts()) { - // It's a true state change, so mark the method as decompiled. - // Do it only for transition from alive. + if (update_recompile_counts()) { + // Mark the method as decompiled. inc_decompile_count(); } - // If the state is becoming a zombie, signal to unregister the nmethod with - // the heap. - // This nmethod may have already been unloaded during a full GC. - if ((state == zombie) && !is_unloaded()) { - nmethod_needs_unregister = true; - } - - // Must happen before state change. Otherwise we have a race condition in - // nmethod::can_convert_to_zombie(). I.e., a method can immediately - // transition its state from 'not_entrant' to 'zombie' without having to wait - // for stack scanning. - if (state == not_entrant) { - mark_as_seen_on_stack(); - OrderAccess::storestore(); // _stack_traversal_mark and _state + BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); + if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) { + // If nmethod entry barriers are not supported, we won't mark + // nmethods as on-stack when they become on-stack. So we + // degrade to a less accurate flushing strategy, for now. + mark_as_maybe_on_stack(); } // Change state - if (!try_transition(state)) { - // If the transition fails, it is due to another thread making the nmethod more - // dead. In particular, one thread might be making the nmethod unloaded concurrently. - // If so, having patched in the jump in the verified entry unnecessarily is fine. - // The nmethod is no longer possible to call by Java threads. - // Incrementing the decompile count is also fine as the caller of make_not_entrant() - // had a valid reason to deoptimize the nmethod. - // Marking the nmethod as seen on stack also has no effect, as the nmethod is now - // !is_alive(), and the seen on stack value is only used to convert not_entrant - // nmethods to zombie in can_convert_to_zombie(). - return false; - } + bool success = try_transition(not_entrant); + assert(success, "Transition can't fail"); // Log the transition once log_state_change(); @@ -1525,96 +1340,69 @@ bool nmethod::make_not_entrant_or_zombie(int state) { } #endif - // When the nmethod becomes zombie it is no longer alive so the - // dependencies must be flushed. nmethods in the not_entrant - // state will be flushed later when the transition to zombie - // happens or they get unloaded. - if (state == zombie) { - { - // Flushing dependencies must be done before any possible - // safepoint can sneak in, otherwise the oops used by the - // dependency logic could have become stale. - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - if (nmethod_needs_unregister) { - Universe::heap()->unregister_nmethod(this); - } - flush_dependencies(/*delete_immediately*/true); - } + return true; +} -#if INCLUDE_JVMCI - // Now that the nmethod has been unregistered, it's - // safe to clear the HotSpotNmethod mirror oop. - if (nmethod_data != NULL) { - nmethod_data->clear_nmethod_mirror(this); - } -#endif +// For concurrent GCs, there must be a handshake between unlink and flush +void nmethod::unlink() { + if (_unlinked_next != NULL) { + // Already unlinked. It can be invoked twice because concurrent code cache + // unloading might need to restart when inline cache cleaning fails due to + // running out of ICStubs, which can only be refilled at safepoints + return; + } - // Clear ICStubs to prevent back patching stubs of zombie or flushed - // nmethods during the next safepoint (see ICStub::finalize), as well - // as to free up CompiledICHolder resources. - { - CompiledICLocker ml(this); - clear_ic_callsites(); - } + flush_dependencies(); - // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload - // event and it hasn't already been reported for this nmethod then - // report it now. The event may have been reported earlier if the GC - // marked it for unloading). JvmtiDeferredEventQueue support means - // we no longer go to a safepoint here. - post_compiled_method_unload(); + // unlink_from_method will take the CompiledMethod_lock. + // In this case we don't strictly need it when unlinking nmethods from + // the Method, because it is only concurrently unlinked by + // the entry barrier, which acquires the per nmethod lock. + unlink_from_method(); + clear_ic_callsites(); -#ifdef ASSERT - // It's no longer safe to access the oops section since zombie - // nmethods aren't scanned for GC. - _oops_are_stale = true; -#endif - // the Method may be reclaimed by class unloading now that the - // nmethod is in zombie state - set_method(NULL); - } else { - assert(state == not_entrant, "other cases may need to be handled differently"); + if (is_osr_method()) { + invalidate_osr_method(); } - if (TraceCreateZombies && state == zombie) { - ResourceMark m; - tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie"); +#if INCLUDE_JVMCI + // Clear the link between this nmethod and a HotSpotNmethod mirror + JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); + if (nmethod_data != NULL) { + nmethod_data->invalidate_nmethod_mirror(this); } +#endif - NMethodSweeper::report_state_change(this); - return true; + // Post before flushing as jmethodID is being used + post_compiled_method_unload(); + + // Register for flushing when it is safe. For concurrent class unloading, + // that would be after the unloading handshake, and for STW class unloading + // that would be when getting back to the VM thread. + CodeCache::register_unlinked(this); } void nmethod::flush() { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - // Note that there are no valid oops in the nmethod anymore. - assert(!is_osr_method() || is_unloaded() || is_zombie(), - "osr nmethod must be unloaded or zombie before flushing"); - assert(is_zombie() || is_osr_method(), "must be a zombie method"); - assert (!is_locked_by_vm(), "locked methods shouldn't be flushed"); - assert_locked_or_safepoint(CodeCache_lock); + MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); // completely deallocate this method - Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this)); - if (PrintMethodFlushing) { - tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT - "/Free CodeCache:" SIZE_FORMAT "Kb", - is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(), - CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); - } + Events::log(Thread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this)); + log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT + "/Free CodeCache:" SIZE_FORMAT "Kb", + is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(), + CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); // We need to deallocate any ExceptionCache data. // Note that we do not need to grab the nmethod lock for this, it // better be thread safe if we're disposing of it! ExceptionCache* ec = exception_cache(); - set_exception_cache(NULL); while(ec != NULL) { ExceptionCache* next = ec->next(); delete ec; ec = next; } - Universe::heap()->flush_nmethod(this); + Universe::heap()->unregister_nmethod(this); CodeCache::unregister_old_nmethod(this); CodeBlob::flush(); @@ -1637,79 +1425,51 @@ oop nmethod::oop_at_phantom(int index) const { // // Notify all classes this nmethod is dependent on that it is no -// longer dependent. This should only be called in two situations. -// First, when a nmethod transitions to a zombie all dependents need -// to be clear. Since zombification happens at a safepoint there's no -// synchronization issues. The second place is a little more tricky. -// During phase 1 of mark sweep class unloading may happen and as a -// result some nmethods may get unloaded. In this case the flushing -// of dependencies must happen during phase 1 since after GC any -// dependencies in the unloaded nmethod won't be updated, so -// traversing the dependency information in unsafe. In that case this -// function is called with a boolean argument and this function only -// notifies instanceKlasses that are reachable - -void nmethod::flush_dependencies(bool delete_immediately) { - DEBUG_ONLY(bool called_by_gc = Universe::heap()->is_gc_active() || - Thread::current()->is_ConcurrentGC_thread() || - Thread::current()->is_Worker_thread();) - assert(called_by_gc != delete_immediately, - "delete_immediately is false if and only if we are called during GC"); +// longer dependent. + +void nmethod::flush_dependencies() { if (!has_flushed_dependencies()) { set_has_flushed_dependencies(); for (Dependencies::DepStream deps(this); deps.next(); ) { if (deps.type() == Dependencies::call_site_target_value) { // CallSite dependencies are managed on per-CallSite instance basis. oop call_site = deps.argument_oop(0); - if (delete_immediately) { - assert_locked_or_safepoint(CodeCache_lock); - MethodHandles::remove_dependent_nmethod(call_site, this); - } else { - MethodHandles::clean_dependency_context(call_site); - } + MethodHandles::clean_dependency_context(call_site); } else { Klass* klass = deps.context_type(); if (klass == NULL) { continue; // ignore things like evol_method } - // During GC delete_immediately is false, and liveness - // of dependee determines class that needs to be updated. - if (delete_immediately) { - assert_locked_or_safepoint(CodeCache_lock); - InstanceKlass::cast(klass)->remove_dependent_nmethod(this); - } else if (klass->is_loader_alive()) { - // The GC may clean dependency contexts concurrently and in parallel. - InstanceKlass::cast(klass)->clean_dependency_context(); - } + // During GC liveness of dependee determines class that needs to be updated. + // The GC may clean dependency contexts concurrently and in parallel. + InstanceKlass::cast(klass)->clean_dependency_context(); } } } } +void nmethod::post_compiled_method(CompileTask* task) { + task->mark_success(); + task->set_nm_content_size(content_size()); + task->set_nm_insts_size(insts_size()); + task->set_nm_total_size(total_size()); + + // JVMTI -- compiled method notification (must be done outside lock) + post_compiled_method_load_event(); + + if (CompilationLog::log() != NULL) { + CompilationLog::log()->log_nmethod(JavaThread::current(), this); + } + + const DirectiveSet* directive = task->directive(); + maybe_print_nmethod(directive); +} + // ------------------------------------------------------------------ // post_compiled_method_load_event // new method for install_code() path // Transfer information from compilation to jvmti void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) { - - // Don't post this nmethod load event if it is already dying - // because the sweeper might already be deleting this nmethod. - { - MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); - // When the nmethod is acquired from the CodeCache iterator, it can racingly become zombie - // before this code is called. Filter them out here under the CompiledMethod_lock. - if (!is_alive()) { - return; - } - // As for is_alive() nmethods, we also don't want them to racingly become zombie once we - // release this lock, so we check that this is not going to be the case. - if (is_not_entrant() && can_convert_to_zombie()) { - return; - } - // Ensure the sweeper can't collect this nmethod until it become "active" with JvmtiThreadState::nmethods_do. - mark_as_seen_on_stack(); - } - // This is a bad time for a safepoint. We don't want // this nmethod to get unloaded while we're queueing the event. NoSafepointVerifier nsv; @@ -1744,37 +1504,19 @@ void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) { } void nmethod::post_compiled_method_unload() { - if (unload_reported()) { - // During unloading we transition to unloaded and then to zombie - // and the unloading is reported during the first transition. - return; - } - - assert(_method != NULL && !is_unloaded(), "just checking"); + assert(_method != NULL, "just checking"); DTRACE_METHOD_UNLOAD_PROBE(method()); // If a JVMTI agent has enabled the CompiledMethodUnload event then - // post the event. Sometime later this nmethod will be made a zombie - // by the sweeper but the Method* will not be valid at that point. - // The jmethodID is a weak reference to the Method* so if - // it's being unloaded there's no way to look it up since the weak - // ref will have been cleared. + // post the event. The Method* will not be valid when this is freed. // Don't bother posting the unload if the load event wasn't posted. if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) { - assert(!unload_reported(), "already unloaded"); JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_unload_event( method()->jmethod_id(), insts_begin()); ServiceThread::enqueue_deferred_event(&event); } - - // The JVMTI CompiledMethodUnload event can be enabled or disabled at - // any time. As the nmethod is being unloaded now we mark it has - // having the unload event reported - this will ensure that we don't - // attempt to report the event in the unlikely scenario where the - // event is enabled at the time the nmethod is made a zombie. - set_unload_reported(); } // Iterate over metadata calling this function. Used by RedefineClasses @@ -1824,8 +1566,40 @@ void nmethod::metadata_do(MetadataClosure* f) { if (_method != NULL) f->do_metadata(_method); } +// Heuristic for nuking nmethods even though their oops are live. +// Main purpose is to reduce code cache pressure and get rid of +// nmethods that don't seem to be all that relevant any longer. +bool nmethod::is_cold() { + if (!MethodFlushing || is_native_method() || is_not_installed()) { + // No heuristic unloading at all + return false; + } + + if (!is_maybe_on_stack() && is_not_entrant()) { + // Not entrant nmethods that are not on any stack can just + // be removed + return true; + } + + BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); + if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) { + // On platforms that don't support nmethod entry barriers, we can't + // trust the temporal aspect of the gc epochs. So we can't detect + // cold nmethods on such platforms. + return false; + } + + if (!UseCodeCacheFlushing) { + // Bail out if we don't heuristically remove nmethods + return false; + } + + // Other code can be phased out more gradually after N GCs + return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count(); +} + // The _is_unloading_state encodes a tuple comprising the unloading cycle -// and the result of IsUnloadingBehaviour::is_unloading() fpr that cycle. +// and the result of IsUnloadingBehaviour::is_unloading() for that cycle. // This is the bit layout of the _is_unloading_state byte: 00000CCU // CC refers to the cycle, which has 2 bits, and U refers to the result of // IsUnloadingBehaviour::is_unloading() for that unloading cycle. @@ -1876,40 +1650,11 @@ bool nmethod::is_unloading() { return false; } - // The IsUnloadingBehaviour is responsible for checking if there are any dead - // oops in the CompiledMethod, by calling oops_do on it. + // The IsUnloadingBehaviour is responsible for calculating if the nmethod + // should be unloaded. This can be either because there is a dead oop, + // or because is_cold() heuristically determines it is time to unload. state_unloading_cycle = current_cycle; - - if (is_zombie()) { - // Zombies without calculated unloading epoch are never unloading due to GC. - - // There are no races where a previously observed is_unloading() nmethod - // suddenly becomes not is_unloading() due to here being observed as zombie. - - // With STW unloading, all is_alive() && is_unloading() nmethods are unlinked - // and unloaded in the safepoint. That makes races where an nmethod is first - // observed as is_alive() && is_unloading() and subsequently observed as - // is_zombie() impossible. - - // With concurrent unloading, all references to is_unloading() nmethods are - // first unlinked (e.g. IC caches and dependency contexts). Then a global - // handshake operation is performed with all JavaThreads before finally - // unloading the nmethods. The sweeper never converts is_alive() && is_unloading() - // nmethods to zombies; it waits for them to become is_unloaded(). So before - // the global handshake, it is impossible for is_unloading() nmethods to - // racingly become is_zombie(). And is_unloading() is calculated for all is_alive() - // nmethods before taking that global handshake, meaning that it will never - // be recalculated after the handshake. - - // After that global handshake, is_unloading() nmethods are only observable - // to the iterators, and they will never trigger recomputation of the cached - // is_unloading_state, and hence may not suffer from such races. - - state_is_unloading = false; - } else { - state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this); - } - + state_is_unloading = IsUnloadingBehaviour::is_unloading(this); state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle); RawAccess::store(&_is_unloading_state, state); @@ -1925,15 +1670,11 @@ void nmethod::clear_unloading_state() { // This is called at the end of the strong tracing/marking phase of a // GC to unload an nmethod if it contains otherwise unreachable -// oops. - +// oops or is heuristically found to be not important. void nmethod::do_unloading(bool unloading_occurred) { // Make sure the oop's ready to receive visitors - assert(!is_zombie() && !is_unloaded(), - "should not call follow on zombie or unloaded nmethod"); - if (is_unloading()) { - make_unloaded(); + unlink(); } else { guarantee(unload_nmethod_caches(unloading_occurred), "Should not need transition stubs"); @@ -1945,9 +1686,6 @@ void nmethod::do_unloading(bool unloading_occurred) { } void nmethod::oops_do(OopClosure* f, bool allow_dead) { - // make sure the oops ready to receive visitors - assert(allow_dead || is_alive(), "should not call follow on dead nmethod: %d", _state); - // Prevent extra code cache walk for platforms that don't have immediate oops. if (relocInfo::mustIterateImmediateOopsInCode()) { RelocIterator iter(this, oops_reloc_begin()); @@ -1979,8 +1717,8 @@ void nmethod::follow_nmethod(OopIterateClosure* cl) { // Process oops in the nmethod oops_do(cl); - // CodeCache sweeper support - mark_as_maybe_on_continuation(); + // CodeCache unloading support + mark_as_maybe_on_stack(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); bs_nm->disarm(this); @@ -2352,7 +2090,7 @@ void nmethod::check_all_dependencies(DepChange& changes) { // Iterate over live nmethods and check dependencies of all nmethods that are not // marked for deoptimization. A particular dependency is only checked once. - NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { nmethod* nm = iter.method(); // Only notify for live nmethods @@ -2406,51 +2144,11 @@ bool nmethod::is_dependent_on_method(Method* dependee) { return false; } - -bool nmethod::is_patchable_at(address instr_addr) { - assert(insts_contains(instr_addr), "wrong nmethod used"); - if (is_zombie()) { - // a zombie may never be patched - return false; - } - return true; -} - - void nmethod_init() { // make sure you didn't forget to adjust the filler fields assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word"); } - -//------------------------------------------------------------------------------------------- - - -// QQQ might we make this work from a frame?? -nmethodLocker::nmethodLocker(address pc) { - CodeBlob* cb = CodeCache::find_blob(pc); - guarantee(cb != NULL && cb->is_compiled(), "bad pc for a nmethod found"); - _nm = cb->as_compiled_method(); - lock_nmethod(_nm); -} - -// Only JvmtiDeferredEvent::compiled_method_unload_event() -// should pass zombie_ok == true. -void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) { - if (cm == NULL) return; - nmethod* nm = cm->as_nmethod(); - Atomic::inc(&nm->_lock_count); - assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p", nm); -} - -void nmethodLocker::unlock_nmethod(CompiledMethod* cm) { - if (cm == NULL) return; - nmethod* nm = cm->as_nmethod(); - Atomic::dec(&nm->_lock_count); - assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock"); -} - - // ----------------------------------------------------------------------------- // Verification @@ -2486,11 +2184,7 @@ class VerifyMetadataClosure: public MetadataClosure { void nmethod::verify() { - - // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant - // seems odd. - - if (is_zombie() || is_not_entrant() || is_unloaded()) + if (is_not_entrant()) return; // Make sure all the entry points are correctly aligned for patching. @@ -3551,7 +3245,7 @@ class DirectNativeCallWrapper: public NativeCallWrapper { } virtual void verify_resolve_call(address dest) const { - CodeBlob* db = CodeCache::find_blob_unsafe(dest); + CodeBlob* db = CodeCache::find_blob(dest); assert(db != NULL && !db->is_adapter_blob(), "must use stub!"); } diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 8ed4c5ea2a3d8..bd2af1053a005 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -27,6 +27,7 @@ #include "code/compiledMethod.hpp" +class CompileTask; class DepChange; class DirectiveSet; class DebugInformationRecorder; @@ -66,7 +67,6 @@ class JVMCINMethodData; class nmethod : public CompiledMethod { friend class VMStructs; friend class JVMCIVMStructs; - friend class NMethodSweeper; friend class CodeCache; // scavengable oops friend class JVMCINMethodData; @@ -74,13 +74,6 @@ class nmethod : public CompiledMethod { uint64_t _gc_epoch; - // not_entrant method removal. Each mark_sweep pass will update - // this mark to current sweep invocation count if it is seen on the - // stack. An not_entrant method can be removed when there are no - // more activations, i.e., when the _stack_traversal_mark is less than - // current sweep traversal index. - volatile int64_t _stack_traversal_mark; - // To support simple linked-list chaining of nmethods: nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head @@ -203,6 +196,8 @@ class nmethod : public CompiledMethod { address _verified_entry_point; // entry point without class check address _osr_entry_point; // entry point for on stack replacement + nmethod* _unlinked_next; + // Shared fields for all nmethod's int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method @@ -240,19 +235,6 @@ class nmethod : public CompiledMethod { RTMState _rtm_state; #endif - // Nmethod Flushing lock. If non-zero, then the nmethod is not removed - // and is not made into a zombie. However, once the nmethod is made into - // a zombie, it will be locked one final time if CompiledMethodUnload - // event processing needs to be done. - volatile jint _lock_count; - - // The _hotness_counter indicates the hotness of a method. The higher - // the value the hotter the method. The hotness counter of a nmethod is - // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method - // is active while stack scanning (do_stack_scanning()). The hotness - // counter is decreased (by 1) while sweeping. - int _hotness_counter; - // These are used for compiled synchronized native methods to // locate the owner and stack slot for the BasicLock. They are // needed because there is no debug information for compiled native @@ -273,17 +255,10 @@ class nmethod : public CompiledMethod { bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) // used by jvmti to track if an event has been posted for this nmethod. - bool _unload_reported; bool _load_reported; // Protected by CompiledMethod_lock - volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded} - -#ifdef ASSERT - bool _oops_are_stale; // indicates that it's no longer safe to access oops section -#endif - - friend class nmethodLocker; + volatile signed char _state; // {not_installed, in_use, not_used, not_entrant} // For native wrappers nmethod(Method* method, @@ -330,7 +305,6 @@ class nmethod : public CompiledMethod { // Returns true if this thread changed the state of the nmethod or // false if another thread performed the transition. - bool make_not_entrant_or_zombie(int state); bool make_entrant() { Unimplemented(); return false; } void inc_decompile_count(); @@ -439,10 +413,6 @@ class nmethod : public CompiledMethod { int total_size () const; - void dec_hotness_counter() { _hotness_counter--; } - void set_hotness_counter(int val) { _hotness_counter = val; } - int hotness_counter() const { return _hotness_counter; } - // Containment bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } @@ -456,15 +426,17 @@ class nmethod : public CompiledMethod { // flag accessing and manipulation bool is_not_installed() const { return _state == not_installed; } bool is_in_use() const { return _state <= in_use; } - bool is_alive() const { return _state < unloaded; } bool is_not_entrant() const { return _state == not_entrant; } - bool is_zombie() const { return _state == zombie; } - bool is_unloaded() const { return _state == unloaded; } void clear_unloading_state(); + // Heuristically deduce an nmethod isn't worth keeping around + bool is_cold(); virtual bool is_unloading(); virtual void do_unloading(bool unloading_occurred); + nmethod* unlinked_next() const { return _unlinked_next; } + void set_unlinked_next(nmethod* next) { _unlinked_next = next; } + #if INCLUDE_RTM_OPT // rtm state accessing and manipulating RTMState rtm_state() const { return _rtm_state; } @@ -478,22 +450,16 @@ class nmethod : public CompiledMethod { // alive. It is used when an uncommon trap happens. Returns true // if this thread changed the state of the nmethod or false if // another thread performed the transition. - bool make_not_entrant() { - assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); - return make_not_entrant_or_zombie(not_entrant); - } + bool make_not_entrant(); bool make_not_used() { return make_not_entrant(); } - bool make_zombie() { return make_not_entrant_or_zombie(zombie); } int get_state() const { return _state; } - void make_unloaded(); - bool has_dependencies() { return dependencies_size() != 0; } void print_dependencies() PRODUCT_RETURN; - void flush_dependencies(bool delete_immediately); + void flush_dependencies(); bool has_flushed_dependencies() { return _has_flushed_dependencies; } void set_has_flushed_dependencies() { assert(!has_flushed_dependencies(), "should only happen once"); @@ -511,7 +477,6 @@ class nmethod : public CompiledMethod { oop* oop_addr_at(int index) const { // for GC // relocation indexes are biased by 1 (because 0 is reserved) assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); - assert(!_oops_are_stale, "oops are stale"); return &oops_begin()[index - 1]; } @@ -536,10 +501,6 @@ class nmethod : public CompiledMethod { void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } - // Sweeper support - int64_t stack_traversal_mark() { return _stack_traversal_mark; } - void set_stack_traversal_mark(int64_t l) { _stack_traversal_mark = l; } - // On-stack replacement support int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } @@ -550,24 +511,15 @@ class nmethod : public CompiledMethod { // Verify calls to dead methods have been cleaned. void verify_clean_inline_caches(); - // unlink and deallocate this nmethod - // Only NMethodSweeper class is expected to use this. NMethodSweeper is not - // expected to use any other private methods/data in this class. + // Unlink this nmethod from the system + void unlink(); - protected: + // Deallocate this nmethod - called by the GC void flush(); - public: - // When true is returned, it is unsafe to remove this nmethod even if - // it is a zombie, since the VM or the ServiceThread might still be - // using it. - bool is_locked_by_vm() const { return _lock_count >0; } - // See comment at definition of _last_seen_on_stack - void mark_as_seen_on_stack(); - void mark_as_maybe_on_continuation(); - bool is_maybe_on_continuation_stack(); - bool can_convert_to_zombie(); + void mark_as_maybe_on_stack(); + bool is_maybe_on_stack(); // Evolution support. We make old (discarded) compiled methods point to new Method*s. void set_method(Method* method) { _method = method; } @@ -625,9 +577,7 @@ class nmethod : public CompiledMethod { address* orig_pc_addr(const frame* fr); - // used by jvmti to track if the load and unload events has been reported - bool unload_reported() const { return _unload_reported; } - void set_unload_reported() { _unload_reported = true; } + // used by jvmti to track if the load events has been reported bool load_reported() const { return _load_reported; } void set_load_reported() { _load_reported = true; } @@ -638,6 +588,9 @@ class nmethod : public CompiledMethod { int orig_pc_offset() { return _orig_pc_offset; } + // Post successful compilation + void post_compiled_method(CompileTask* task); + // jvmti support: void post_compiled_method_load_event(JvmtiThreadState* state = NULL); @@ -682,7 +635,7 @@ class nmethod : public CompiledMethod { void print_calls(outputStream* st) PRODUCT_RETURN; static void print_statistics() PRODUCT_RETURN; - void maybe_print_nmethod(DirectiveSet* directive); + void maybe_print_nmethod(const DirectiveSet* directive); void print_nmethod(bool print_code); // need to re-define this from CodeBlob else the overload hides it @@ -730,9 +683,6 @@ class nmethod : public CompiledMethod { // corresponds to the given method as well. virtual bool is_dependent_on_method(Method* dependee); - // is it ok to patch at address? - bool is_patchable_at(address instr_address); - // JVMTI's GetLocalInstance() support ByteSize native_receiver_sp_offset() { return _native_receiver_sp_offset; @@ -760,50 +710,4 @@ class nmethod : public CompiledMethod { void finalize_relocations(); }; -// Locks an nmethod so its code will not get removed and it will not -// be made into a zombie, even if it is a not_entrant method. After the -// nmethod becomes a zombie, if CompiledMethodUnload event processing -// needs to be done, then lock_nmethod() is used directly to keep the -// generated code from being reused too early. -class nmethodLocker : public StackObj { - CompiledMethod* _nm; - - public: - - // note: nm can be NULL - // Only JvmtiDeferredEvent::compiled_method_unload_event() - // should pass zombie_ok == true. - static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false); - static void unlock_nmethod(CompiledMethod* nm); // (ditto) - - nmethodLocker(address pc); // derive nm from pc - nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } - nmethodLocker(CompiledMethod *nm) { - _nm = nm; - lock(_nm); - } - - static void lock(CompiledMethod* method, bool zombie_ok = false) { - if (method == NULL) return; - lock_nmethod(method, zombie_ok); - } - - static void unlock(CompiledMethod* method) { - if (method == NULL) return; - unlock_nmethod(method); - } - - nmethodLocker() { _nm = NULL; } - ~nmethodLocker() { - unlock(_nm); - } - - CompiledMethod* code() { return _nm; } - void set_code(CompiledMethod* new_nm, bool zombie_ok = false) { - unlock(_nm); // note: This works even if _nm==new_nm. - _nm = new_nm; - lock(_nm, zombie_ok); - } -}; - #endif // SHARE_CODE_NMETHOD_HPP diff --git a/src/hotspot/share/compiler/compilationLog.cpp b/src/hotspot/share/compiler/compilationLog.cpp new file mode 100644 index 0000000000000..a0916fb3df94e --- /dev/null +++ b/src/hotspot/share/compiler/compilationLog.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "code/nmethod.hpp" +#include "compiler/compilationLog.hpp" +#include "compiler/compileTask.hpp" +#include "logging/log.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/thread.hpp" +#include "utilities/ostream.hpp" + +CompilationLog* CompilationLog::_log; + +CompilationLog::CompilationLog() : StringEventLog("Compilation events", "jit") { +} + +void CompilationLog::log_compile(JavaThread* thread, CompileTask* task) { + StringLogMessage lm; + stringStream sstr(lm.buffer(), lm.size()); + // msg.time_stamp().update_to(tty->time_stamp().ticks()); + task->print(&sstr, NULL, true, false); + log(thread, "%s", (const char*)lm); +} + +void CompilationLog::log_nmethod(JavaThread* thread, nmethod* nm) { + log(thread, "nmethod %d%s " INTPTR_FORMAT " code [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", + nm->compile_id(), nm->is_osr_method() ? "%" : "", + p2i(nm), p2i(nm->code_begin()), p2i(nm->code_end())); +} + +void CompilationLog::log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) { + StringLogMessage lm; + lm.print("%4d COMPILE SKIPPED: %s", task->compile_id(), reason); + if (retry_message != NULL) { + lm.append(" (%s)", retry_message); + } + lm.print("\n"); + log(thread, "%s", (const char*)lm); +} + +void CompilationLog::log_metaspace_failure(const char* reason) { + // Note: This method can be called from non-Java/compiler threads to + // log the global metaspace failure that might affect profiling. + ResourceMark rm; + StringLogMessage lm; + lm.print("%4d COMPILE PROFILING SKIPPED: %s", -1, reason); + lm.print("\n"); + log(Thread::current(), "%s", (const char*)lm); +} + +void CompilationLog::init() { + _log = new CompilationLog(); +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64ThreadContext.java b/src/hotspot/share/compiler/compilationLog.hpp similarity index 56% rename from src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64ThreadContext.java rename to src/hotspot/share/compiler/compilationLog.hpp index 65d82749df65b..664e104d4a27c 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64ThreadContext.java +++ b/src/hotspot/share/compiler/compilationLog.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,25 +22,31 @@ * */ -package sun.jvm.hotspot.debugger.proc.amd64; +#ifndef SHARE_COMPILER_COMPILATIONLOG_HPP +#define SHARE_COMPILER_COMPILATIONLOG_HPP -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.amd64.*; -import sun.jvm.hotspot.debugger.proc.*; +#include "utilities/events.hpp" -public class ProcAMD64ThreadContext extends AMD64ThreadContext { - private ProcDebugger debugger; +class CompileTask; +class JavaThread; +class nmethod; - public ProcAMD64ThreadContext(ProcDebugger debugger) { - super(); - this.debugger = debugger; - } +class CompilationLog : public StringEventLog { +private: + static CompilationLog* _log; - public void setRegisterAsAddress(int index, Address value) { - setRegister(index, debugger.getAddressValue(value)); - } + CompilationLog(); - public Address getRegisterAsAddress(int index) { - return debugger.newAddress(getRegister(index)); - } -} +public: + + void log_compile(JavaThread* thread, CompileTask* task); + void log_nmethod(JavaThread* thread, nmethod* nm); + void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message); + void log_metaspace_failure(const char* reason); + + static void init(); + static CompilationLog* log() { return _log; } + using StringEventLog::log; +}; + +#endif // SHARE_COMPILER_COMPILATIONLOG_HPP diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 6e34d551eaadc..6229e7a750c7e 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -31,6 +31,7 @@ #include "code/codeCache.hpp" #include "code/codeHeapState.hpp" #include "code/dependencyContext.hpp" +#include "compiler/compilationLog.hpp" #include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" @@ -63,7 +64,6 @@ #include "runtime/perfData.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/sweeper.hpp" #include "runtime/threads.hpp" #include "runtime/threadSMR.hpp" #include "runtime/timerTrace.hpp" @@ -194,53 +194,9 @@ CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; CompileQueue* CompileBroker::_c2_compile_queue = NULL; CompileQueue* CompileBroker::_c1_compile_queue = NULL; - - -class CompilationLog : public StringEventLog { - public: - CompilationLog() : StringEventLog("Compilation events", "jit") { - } - - void log_compile(JavaThread* thread, CompileTask* task) { - StringLogMessage lm; - stringStream sstr(lm.buffer(), lm.size()); - // msg.time_stamp().update_to(tty->time_stamp().ticks()); - task->print(&sstr, NULL, true, false); - log(thread, "%s", (const char*)lm); - } - - void log_nmethod(JavaThread* thread, nmethod* nm) { - log(thread, "nmethod %d%s " INTPTR_FORMAT " code [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", - nm->compile_id(), nm->is_osr_method() ? "%" : "", - p2i(nm), p2i(nm->code_begin()), p2i(nm->code_end())); - } - - void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) { - StringLogMessage lm; - lm.print("%4d COMPILE SKIPPED: %s", task->compile_id(), reason); - if (retry_message != NULL) { - lm.append(" (%s)", retry_message); - } - lm.print("\n"); - log(thread, "%s", (const char*)lm); - } - - void log_metaspace_failure(const char* reason) { - // Note: This method can be called from non-Java/compiler threads to - // log the global metaspace failure that might affect profiling. - ResourceMark rm; - StringLogMessage lm; - lm.print("%4d COMPILE PROFILING SKIPPED: %s", -1, reason); - lm.print("\n"); - log(Thread::current(), "%s", (const char*)lm); - } -}; - -static CompilationLog* _compilation_log = NULL; - bool compileBroker_init() { if (LogEvents) { - _compilation_log = new CompilationLog(); + CompilationLog::init(); } // init directives stack, adding default directive @@ -269,7 +225,6 @@ CompileTaskWrapper::~CompileTaskWrapper() { CompileLog* log = thread->log(); if (log != NULL && !task->is_unloaded()) task->log_task_done(log); thread->set_task(NULL); - task->set_code_handle(NULL); thread->set_env(NULL); if (task->is_blocking()) { bool free_task = false; @@ -452,10 +407,7 @@ CompileTask* CompileQueue::get(CompilerThread* thread) { // If there are no compilation tasks and we can compile new jobs // (i.e., there is enough free space in the code cache) there is - // no need to invoke the sweeper. As a result, the hotness of methods - // remains unchanged. This behavior is desired, since we want to keep - // the stable state, i.e., we do not want to evict methods from the - // code cache if it is unnecessary. + // no need to invoke the GC. // We need a timed wait here, since compiler threads can exit if compilation // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads // is not critical and we do not want idle compiler threads to wake up too often. @@ -699,8 +651,8 @@ void CompileBroker::compilation_init_phase1(JavaThread* THREAD) { } #endif // INCLUDE_JVMCI - // Start the compiler thread(s) and the sweeper thread - init_compiler_sweeper_threads(); + // Start the compiler thread(s) + init_compiler_threads(); // totalTime performance counter is always created as it is required // by the implementation of java.lang.management.CompilationMXBean. { @@ -828,7 +780,7 @@ class DeoptimizeObjectsALotThread : public JavaThread { }; // Entry for DeoptimizeObjectsALotThread. The threads are started in -// CompileBroker::init_compiler_sweeper_threads() iff DeoptimizeObjectsALot is enabled +// CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) { DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); bool enter_single_loop; @@ -891,9 +843,6 @@ JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, C new_thread = new CompilerThread(queue, counters); } break; - case sweeper_t: - new_thread = new CodeCacheSweeperThread(); - break; #if defined(ASSERT) && COMPILER2_OR_JVMCI case deoptimizer_t: new_thread = new DeoptimizeObjectsALotThread(); @@ -957,10 +906,7 @@ JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, C } -void CompileBroker::init_compiler_sweeper_threads() { - NMethodSweeper::set_sweep_threshold_bytes(static_cast(SweeperThreshold * ReservedCodeCacheSize / 100.0)); - log_info(codecache, sweep)("Sweeper threshold: " SIZE_FORMAT " bytes", NMethodSweeper::sweep_threshold_bytes()); - +void CompileBroker::init_compiler_threads() { // Ensure any exceptions lead to vm_exit_during_initialization. EXCEPTION_MARK; #if !defined(ZERO) @@ -1032,13 +978,6 @@ void CompileBroker::init_compiler_sweeper_threads() { PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK); } - if (MethodFlushing) { - // Initialize the sweeper thread - Handle thread_oop = create_thread_oop("Sweeper thread", CHECK); - jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); - make_thread(sweeper_t, thread_handle, NULL, NULL, THREAD); - } - #if defined(ASSERT) && COMPILER2_OR_JVMCI if (DeoptimizeObjectsALot) { // Initialize and start the object deoptimizer threads @@ -1756,7 +1695,6 @@ void CompileBroker::wait_for_completion(CompileTask* task) { // It is harmless to check this status without the lock, because // completion is a stable property (until the task object is recycled). assert(task->is_complete(), "Compilation should have completed"); - assert(task->code_handle() == NULL, "must be reset"); // By convention, the waiter is responsible for recycling a // blocking CompileTask. Since there is only one waiter ever @@ -1970,8 +1908,6 @@ void CompileBroker::compiler_thread_loop() { // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition // occurs after fetching the compile task off the queue. CompileTaskWrapper ctw(task); - nmethodLocker result_handle; // (handle for the nmethod produced by this task) - task->set_code_handle(&result_handle); methodHandle method(thread, task->method()); // Never compile a method if breakpoints are present in it @@ -2046,8 +1982,8 @@ void CompileBroker::init_compiler_thread_log() { void CompileBroker::log_metaspace_failure() { const char* message = "some methods may not be compiled because metaspace " "is out of memory"; - if (_compilation_log != NULL) { - _compilation_log->log_metaspace_failure(message); + if (CompilationLog::log() != NULL) { + CompilationLog::log()->log_metaspace_failure(message); } if (PrintCompilation) { tty->print_cr("COMPILE PROFILING SKIPPED: %s", message); @@ -2123,26 +2059,16 @@ static void codecache_print(outputStream* out, bool detailed) { } } -void CompileBroker::post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env, - int compilable, const char* failure_reason) { - if (success) { - task->mark_success(); - if (ci_env != NULL) { - task->set_num_inlined_bytecodes(ci_env->num_inlined_bytecodes()); - } - if (_compilation_log != NULL) { - nmethod* code = task->code(); - if (code != NULL) { - _compilation_log->log_nmethod(thread, code); - } - } - } else if (AbortVMOnCompilationFailure) { - if (compilable == ciEnv::MethodCompilable_not_at_tier) { - fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); - } - if (compilable == ciEnv::MethodCompilable_never) { - fatal("Never compilable: %s", failure_reason); - } +void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, + int compilable, const char* failure_reason) { + if (!AbortVMOnCompilationFailure) { + return; + } + if (compilable == ciEnv::MethodCompilable_not_at_tier) { + fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); + } + if (compilable == ciEnv::MethodCompilable_never) { + fatal("Never compilable: %s", failure_reason); } } @@ -2155,7 +2081,7 @@ static void post_compilation_event(EventCompilation& event, CompileTask* task) { task->comp_level(), task->is_success(), task->osr_bci() != CompileBroker::standard_entry_bci, - (task->code() == NULL) ? 0 : task->code()->total_size(), + task->nm_total_size(), task->num_inlined_bytecodes()); } @@ -2179,8 +2105,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { CompilerThread* thread = CompilerThread::current(); ResourceMark rm(thread); - if (LogEvents) { - _compilation_log->log_compile(thread, task); + if (CompilationLog::log() != NULL) { + CompilationLog::log()->log_compile(thread, task); } // Common flags. @@ -2203,6 +2129,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { // Look up matching directives directive = DirectivesStack::getMatchingDirective(method, comp); + task->set_directive(directive); // Update compile information when using perfdata. if (UsePerfData) { @@ -2245,21 +2172,26 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { compilable = ciEnv::MethodCompilable_never; } else { JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__); - methodHandle method(thread, target_handle); - runtime = env.runtime(); - runtime->compile_method(&env, jvmci, method, osr_bci); - failure_reason = compile_state.failure_reason(); - failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); - if (!compile_state.retryable()) { - retry_message = "not retryable"; - compilable = ciEnv::MethodCompilable_not_at_tier; - } - if (task->code() == NULL) { - assert(failure_reason != NULL, "must specify failure_reason"); + if (failure_reason == nullptr) { + methodHandle method(thread, target_handle); + runtime = env.runtime(); + runtime->compile_method(&env, jvmci, method, osr_bci); + + failure_reason = compile_state.failure_reason(); + failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); + if (!compile_state.retryable()) { + retry_message = "not retryable"; + compilable = ciEnv::MethodCompilable_not_at_tier; + } + if (!task->is_success()) { + assert(failure_reason != NULL, "must specify failure_reason"); + } } } - post_compile(thread, task, task->code() != NULL, NULL, compilable, failure_reason); + if (!task->is_success()) { + handle_compile_error(thread, task, NULL, compilable, failure_reason); + } if (event.should_commit()) { post_compilation_event(event, task); } @@ -2320,7 +2252,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { } } - if (!ci_env.failing() && task->code() == NULL) { + DirectivesStack::release(directive); + + if (!ci_env.failing() && !task->is_success()) { //assert(false, "compiler should always document failure"); // The compiler elected, without comment, not to register a result. // Do not attempt further compilations of this method. @@ -2336,7 +2270,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { ci_env.report_failure(failure_reason); } - post_compile(thread, task, !ci_env.failing(), &ci_env, compilable, failure_reason); + if (ci_env.failing()) { + handle_compile_error(thread, task, &ci_env, compilable, failure_reason); + } if (event.should_commit()) { post_compilation_event(event, task); } @@ -2344,8 +2280,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { if (failure_reason != NULL) { task->set_failure_reason(failure_reason, failure_reason_on_C_heap); - if (_compilation_log != NULL) { - _compilation_log->log_failure(thread, task, failure_reason, retry_message); + if (CompilationLog::log() != NULL) { + CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message); } if (PrintCompilation) { FormatBufferResource msg = retry_message != NULL ? @@ -2361,18 +2297,12 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { collect_statistics(thread, time, task); - nmethod* nm = task->code(); - if (nm != NULL) { - nm->maybe_print_nmethod(directive); - } - DirectivesStack::release(directive); - if (PrintCompilation && PrintCompilation2) { tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp tty->print("%4d ", compile_id); // print compilation number tty->print("%s ", (is_osr ? "%" : " ")); - if (task->code() != NULL) { - tty->print("size: %d(%d) ", task->code()->total_size(), task->code()->insts_size()); + if (task->is_success()) { + tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size()); } tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); } @@ -2445,7 +2375,7 @@ void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) { if (UseCodeCacheFlushing) { // Since code cache is full, immediately stop new compiles if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { - NMethodSweeper::log_sweep("disable_compiler"); + log_info(codecache)("Code cache is full - disabling compilation"); } } else { disable_compilation_forever(); @@ -2512,10 +2442,8 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time uint compile_id = task->compile_id(); bool is_osr = (task->osr_bci() != standard_entry_bci); const int comp_level = task->comp_level(); - nmethod* code = task->code(); CompilerCounters* counters = thread->counters(); - assert(code == NULL || code->is_locked_by_vm(), "will survive the MutexLocker"); MutexLocker locker(CompileStatistics_lock); // _perf variables are production performance counters which are @@ -2534,7 +2462,7 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time _perf_total_bailout_count->inc(); } _t_bailedout_compilation.add(time); - } else if (code == NULL) { + } else if (!task->is_success()) { if (UsePerfData) { _perf_last_invalidated_method->set_value(counters->current_method()); _perf_last_invalidated_type->set_value(counters->compile_type()); @@ -2568,8 +2496,8 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time } else { stats->_standard.update(time, bytes_compiled); } - stats->_nmethods_size += code->total_size(); - stats->_nmethods_code_size += code->insts_size(); + stats->_nmethods_size += task->nm_total_size(); + stats->_nmethods_code_size += task->nm_insts_size(); } else { assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); } @@ -2583,8 +2511,8 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time } else { stats->_standard.update(time, bytes_compiled); } - stats->_nmethods_size += code->total_size(); - stats->_nmethods_code_size += code->insts_size(); + stats->_nmethods_size += task->nm_total_size(); + stats->_nmethods_code_size += task->nm_insts_size(); } else { // if (!comp) assert(false, "Compiler object must exist"); } @@ -2613,13 +2541,13 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time } // Collect counts of successful compilations - _sum_nmethod_size += code->total_size(); - _sum_nmethod_code_size += code->insts_size(); + _sum_nmethod_size += task->nm_total_size(); + _sum_nmethod_code_size += task->nm_insts_size(); _total_compile_count++; if (UsePerfData) { - _perf_sum_nmethod_size->inc( code->total_size()); - _perf_sum_nmethod_code_size->inc(code->insts_size()); + _perf_sum_nmethod_size->inc( task->nm_total_size()); + _perf_sum_nmethod_code_size->inc(task->nm_insts_size()); _perf_total_compile_count->inc(); } @@ -2777,14 +2705,6 @@ void CompileBroker::print_info(outputStream *out) { out->print_cr(" Committed size : " SIZE_FORMAT_W(7) " KB", CodeCache::capacity() / K); out->print_cr(" Unallocated capacity : " SIZE_FORMAT_W(7) " KB", CodeCache::unallocated_capacity() / K); out->cr(); - - out->cr(); - out->print_cr("CodeCache cleaning overview"); - out->print_cr("--------------------------------------------------------"); - out->cr(); - NMethodSweeper::print(out); - out->print_cr("--------------------------------------------------------"); - out->cr(); } // Note: tty_lock must not be held upon entry to this function. diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp index 0dd5c90edd1e6..3243cf35e695a 100644 --- a/src/hotspot/share/compiler/compileBroker.hpp +++ b/src/hotspot/share/compiler/compileBroker.hpp @@ -38,7 +38,6 @@ #endif class nmethod; -class nmethodLocker; // CompilerCounters // @@ -230,13 +229,12 @@ class CompileBroker: AllStatic { enum ThreadType { compiler_t, - sweeper_t, deoptimizer_t }; static Handle create_thread_oop(const char* name, TRAPS); static JavaThread* make_thread(ThreadType type, jobject thread_oop, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD); - static void init_compiler_sweeper_threads(); + static void init_compiler_threads(); static void possibly_add_compiler_threads(JavaThread* THREAD); static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded); @@ -255,8 +253,8 @@ class CompileBroker: AllStatic { #endif static void invoke_compiler_on_method(CompileTask* task); - static void post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env, - int compilable, const char* failure_reason); + static void handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, + int compilable, const char* failure_reason); static void update_compile_perf_data(CompilerThread *thread, const methodHandle& method, bool is_osr); static void collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task); diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp index 4c8933252fdef..b946afa0091f9 100644 --- a/src/hotspot/share/compiler/compileTask.cpp +++ b/src/hotspot/share/compiler/compileTask.cpp @@ -65,7 +65,6 @@ CompileTask* CompileTask::allocate() { void CompileTask::free(CompileTask* task) { MutexLocker locker(CompileTaskAlloc_lock); if (!task->is_free()) { - task->set_code(NULL); assert(!task->lock()->is_locked(), "Should not be locked when freed"); if ((task->_method_holder != NULL && JNIHandles::is_weak_global_handle(task->_method_holder)) || (task->_hot_method_holder != NULL && JNIHandles::is_weak_global_handle(task->_hot_method_holder))) { @@ -110,7 +109,6 @@ void CompileTask::initialize(int compile_id, _is_complete = false; _is_success = false; - _code_handle = NULL; _hot_method = NULL; _hot_method_holder = NULL; @@ -118,6 +116,10 @@ void CompileTask::initialize(int compile_id, _time_queued = os::elapsed_counter(); _time_started = 0; _compile_reason = compile_reason; + _nm_content_size = 0; + _directive = NULL; + _nm_insts_size = 0; + _nm_total_size = 0; _failure_reason = NULL; _failure_reason_on_C_heap = false; @@ -161,25 +163,6 @@ CompileTask* CompileTask::select_for_compilation() { return this; } -// ------------------------------------------------------------------ -// CompileTask::code/set_code -// -nmethod* CompileTask::code() const { - if (_code_handle == NULL) return NULL; - CodeBlob *blob = _code_handle->code(); - if (blob != NULL) { - return blob->as_nmethod(); - } - return NULL; -} - -void CompileTask::set_code(nmethod* nm) { - if (_code_handle == NULL && nm == NULL) return; - guarantee(_code_handle != NULL, ""); - _code_handle->set_code(nm); - if (nm == NULL) _code_handle = NULL; // drop the handle also -} - void CompileTask::mark_on_stack() { if (is_unloaded()) { return; @@ -257,9 +240,6 @@ void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, i } st->print("%4d ", compile_id); // print compilation number - // For unloaded methods the transition to zombie occurs after the - // method is cleared so it's impossible to report accurate - // information for that case. bool is_synchronized = false; bool has_exception_handler = false; bool is_native = false; @@ -399,9 +379,8 @@ void CompileTask::log_task_done(CompileLog* log) { } // - nmethod* nm = code(); log->begin_elem("task_done success='%d' nmsize='%d' count='%d'", - _is_success, nm == NULL ? 0 : nm->content_size(), + _is_success, _nm_content_size, method->invocation_count()); int bec = method->backedge_count(); if (bec != 0) log->print(" backedge_count='%d'", bec); diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp index 23facc90cc5d8..8dedc202e7ff7 100644 --- a/src/hotspot/share/compiler/compileTask.hpp +++ b/src/hotspot/share/compiler/compileTask.hpp @@ -31,6 +31,8 @@ #include "memory/allocation.hpp" #include "utilities/xmlstream.hpp" +class DirectiveSet; + JVMCI_ONLY(class JVMCICompileState;) // CompileTask @@ -72,35 +74,38 @@ class CompileTask : public CHeapObj { } private: - static CompileTask* _task_free_list; - Monitor* _lock; - uint _compile_id; - Method* _method; - jobject _method_holder; - int _osr_bci; - bool _is_complete; - bool _is_success; - bool _is_blocking; + static CompileTask* _task_free_list; + Monitor* _lock; + uint _compile_id; + Method* _method; + jobject _method_holder; + int _osr_bci; + bool _is_complete; + bool _is_success; + bool _is_blocking; + CodeSection::csize_t _nm_content_size; + CodeSection::csize_t _nm_total_size; + CodeSection::csize_t _nm_insts_size; + const DirectiveSet* _directive; #if INCLUDE_JVMCI - bool _has_waiter; + bool _has_waiter; // Compilation state for a blocking JVMCI compilation - JVMCICompileState* _blocking_jvmci_compile_state; + JVMCICompileState* _blocking_jvmci_compile_state; #endif - int _comp_level; - int _num_inlined_bytecodes; - nmethodLocker* _code_handle; // holder of eventual result - CompileTask* _next, *_prev; - bool _is_free; + int _comp_level; + int _num_inlined_bytecodes; + CompileTask* _next, *_prev; + bool _is_free; // Fields used for logging why the compilation was initiated: - jlong _time_queued; // time when task was enqueued - jlong _time_started; // time when compilation started - Method* _hot_method; // which method actually triggered this task - jobject _hot_method_holder; - int _hot_count; // information about its invocation counter - CompileReason _compile_reason; // more info about the task - const char* _failure_reason; + jlong _time_queued; // time when task was enqueued + jlong _time_started; // time when compilation started + Method* _hot_method; // which method actually triggered this task + jobject _hot_method_holder; + int _hot_count; // information about its invocation counter + CompileReason _compile_reason; // more info about the task + const char* _failure_reason; // Specifies if _failure_reason is on the C heap. - bool _failure_reason_on_C_heap; + bool _failure_reason_on_C_heap; public: CompileTask() : _failure_reason(NULL), _failure_reason_on_C_heap(false) { @@ -122,6 +127,14 @@ class CompileTask : public CHeapObj { bool is_complete() const { return _is_complete; } bool is_blocking() const { return _is_blocking; } bool is_success() const { return _is_success; } + void set_directive(const DirectiveSet* directive) { _directive = directive; } + const DirectiveSet* directive() const { return _directive; } + CodeSection::csize_t nm_content_size() { return _nm_content_size; } + void set_nm_content_size(CodeSection::csize_t size) { _nm_content_size = size; } + CodeSection::csize_t nm_insts_size() { return _nm_insts_size; } + void set_nm_insts_size(CodeSection::csize_t size) { _nm_insts_size = size; } + CodeSection::csize_t nm_total_size() { return _nm_total_size; } + void set_nm_total_size(CodeSection::csize_t size) { _nm_total_size = size; } bool can_become_stale() const { switch (_compile_reason) { case Reason_BackedgeCount: @@ -153,11 +166,6 @@ class CompileTask : public CHeapObj { } #endif - nmethodLocker* code_handle() const { return _code_handle; } - void set_code_handle(nmethodLocker* l) { _code_handle = l; } - nmethod* code() const; // _code_handle->code() - void set_code(nmethod* nm); // _code_handle->set_code(nm) - Monitor* lock() const { return _lock; } void mark_complete() { _is_complete = true; } diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp index f7a2c4f6ef62b..0183690680952 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.cpp +++ b/src/hotspot/share/compiler/compilerDefinitions.cpp @@ -590,19 +590,6 @@ void CompilerConfig::ergo_initialize() { set_jvmci_specific_flags(); #endif - if (FLAG_IS_DEFAULT(SweeperThreshold)) { - if (Continuations::enabled()) { - // When continuations are enabled, the sweeper needs to trigger GC to - // be able to sweep nmethods. Therefore, it's in general a good idea - // to be significantly less aggressive with sweeping, in order not to - // trigger excessive GC work. - FLAG_SET_ERGO(SweeperThreshold, SweeperThreshold * 10.0); - } else if ((SweeperThreshold * ReservedCodeCacheSize / 100) > (1.2 * M)) { - // Cap default SweeperThreshold value to an equivalent of 1.2 Mb - FLAG_SET_ERGO(SweeperThreshold, (1.2 * M * 100) / ReservedCodeCacheSize); - } - } - if (UseOnStackReplacement && !UseLoopCounter) { warning("On-stack-replacement requires loop counters; enabling loop counters"); FLAG_SET_DEFAULT(UseLoopCounter, true); diff --git a/src/hotspot/share/compiler/compilerThread.cpp b/src/hotspot/share/compiler/compilerThread.cpp index fd1ce21a9f812..57a9e07daa281 100644 --- a/src/hotspot/share/compiler/compilerThread.cpp +++ b/src/hotspot/share/compiler/compilerThread.cpp @@ -27,7 +27,6 @@ #include "compiler/compileTask.hpp" #include "compiler/compilerThread.hpp" #include "runtime/javaThread.inline.hpp" -#include "runtime/sweeper.hpp" // Create a CompilerThread CompilerThread::CompilerThread(CompileQueue* queue, @@ -62,34 +61,3 @@ void CompilerThread::thread_entry(JavaThread* thread, TRAPS) { bool CompilerThread::can_call_java() const { return _compiler != NULL && _compiler->is_jvmci(); } - -// Create sweeper thread -CodeCacheSweeperThread::CodeCacheSweeperThread() -: JavaThread(&CodeCacheSweeperThread::thread_entry) { - _scanned_compiled_method = NULL; -} - -void CodeCacheSweeperThread::thread_entry(JavaThread* thread, TRAPS) { - NMethodSweeper::sweeper_loop(); -} - -void CodeCacheSweeperThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) { - JavaThread::oops_do_no_frames(f, cf); - if (_scanned_compiled_method != NULL && cf != NULL) { - // Safepoints can occur when the sweeper is scanning an nmethod so - // process it here to make sure it isn't unloaded in the middle of - // a scan. - cf->do_code_blob(_scanned_compiled_method); - } -} - -void CodeCacheSweeperThread::nmethods_do(CodeBlobClosure* cf) { - JavaThread::nmethods_do(cf); - if (_scanned_compiled_method != NULL && cf != NULL) { - // Safepoints can occur when the sweeper is scanning an nmethod so - // process it here to make sure it isn't unloaded in the middle of - // a scan. - cf->do_code_blob(_scanned_compiled_method); - } -} - diff --git a/src/hotspot/share/compiler/compilerThread.hpp b/src/hotspot/share/compiler/compilerThread.hpp index 1c1fcd438b1a9..ab8ccc5d02329 100644 --- a/src/hotspot/share/compiler/compilerThread.hpp +++ b/src/hotspot/share/compiler/compilerThread.hpp @@ -116,29 +116,4 @@ class CompilerThread : public JavaThread { static void thread_entry(JavaThread* thread, TRAPS); }; -// Dedicated thread to sweep the code cache -class CodeCacheSweeperThread : public JavaThread { - CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper - - static void thread_entry(JavaThread* thread, TRAPS); - - public: - CodeCacheSweeperThread(); - // Track the nmethod currently being scanned by the sweeper - void set_scanned_compiled_method(CompiledMethod* cm) { - assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value"); - _scanned_compiled_method = cm; - } - - // Hide sweeper thread from external view. - bool is_hidden_from_external_view() const { return true; } - - bool is_Code_cache_sweeper_thread() const { return true; } - - // Prevent GC from unloading _scanned_compiled_method - void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf); - void nmethods_do(CodeBlobClosure* cf); -}; - - #endif // SHARE_COMPILER_COMPILERTHREAD_HPP diff --git a/src/hotspot/share/compiler/oopMap.inline.hpp b/src/hotspot/share/compiler/oopMap.inline.hpp index d9cd5ae1ee8ee..11978def80e3b 100644 --- a/src/hotspot/share/compiler/oopMap.inline.hpp +++ b/src/hotspot/share/compiler/oopMap.inline.hpp @@ -31,6 +31,9 @@ #include "runtime/frame.inline.hpp" #include "runtime/globals.hpp" #include "utilities/ostream.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmci_globals.hpp" +#endif inline const ImmutableOopMap* ImmutableOopMapSet::find_map_at_slot(int slot, int pc_offset) const { assert(slot >= 0 && slot < _count, "bounds count: %d slot: %d", _count, slot); diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp index a3f60d2fb0969..2cd02994b140c 100644 --- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp @@ -124,7 +124,6 @@ class EpsilonHeap : public CollectedHeap { // No nmethod handling virtual void register_nmethod(nmethod* nm) {} virtual void unregister_nmethod(nmethod* nm) {} - virtual void flush_nmethod(nmethod* nm) {} virtual void verify_nmethod(nmethod* nm) {} // No heap verification diff --git a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp index 97d6b1bd4c6b8..cb0f4e8172911 100644 --- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp +++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp @@ -81,8 +81,8 @@ void G1CodeBlobClosure::do_evacuation_and_fixup(nmethod* nm) { nm->oops_do(&_oc); if (_strong) { - // CodeCache sweeper support - nm->mark_as_maybe_on_continuation(); + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm != NULL) { @@ -97,8 +97,8 @@ void G1CodeBlobClosure::do_marking(nmethod* nm) { // Mark through oops in the nmethod nm->oops_do(&_marking_oc); - // CodeCache sweeper support - nm->mark_as_maybe_on_continuation(); + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm != NULL) { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 41c71ff352891..270d619022c2a 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -1880,6 +1880,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { case GCCause::_g1_humongous_allocation: return true; case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent; case GCCause::_wb_breakpoint: return true; + case GCCause::_codecache_GC_aggressive: return true; case GCCause::_codecache_GC_threshold: return true; default: return is_user_requested_concurrent_full_gc(cause); } @@ -3427,14 +3428,14 @@ void G1CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, boo } void G1CollectedHeap::start_codecache_marking_cycle_if_inactive() { - if (!Continuations::is_gc_marking_cycle_active()) { + if (!CodeCache::is_gc_marking_cycle_active()) { // This is the normal case when we do not call collect when a // concurrent mark is ongoing. We then start a new code marking // cycle. If, on the other hand, a concurrent mark is ongoing, we // will be conservative and use the last code marking cycle. Code // caches marked between the two concurrent marks will live a bit // longer than needed. - Continuations::on_gc_marking_cycle_start(); - Continuations::arm_all_nmethods(); + CodeCache::on_gc_marking_cycle_start(); + CodeCache::arm_all_nmethods(); } } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 7ca576ab6eeae..fa3cdd2abd913 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -1246,9 +1246,6 @@ class G1CollectedHeap : public CollectedHeap { // Unregister the given nmethod from the G1 heap. void unregister_nmethod(nmethod* nm) override; - // No nmethod flushing needed. - void flush_nmethod(nmethod* nm) override {} - // No nmethod verification implemented. void verify_nmethod(nmethod* nm) override {} diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 2e2145eab967c..c3d60de6b70fc 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -67,7 +67,6 @@ #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/continuation.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -1319,8 +1318,8 @@ void G1ConcurrentMark::remark() { report_object_count(mark_finished); } - Continuations::on_gc_marking_cycle_finish(); - Continuations::arm_all_nmethods(); + CodeCache::on_gc_marking_cycle_finish(); + CodeCache::arm_all_nmethods(); // Statistics double now = os::elapsedTime(); diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index dadfa9a507987..3287a79169621 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -44,7 +44,6 @@ #include "gc/shared/weakProcessor.inline.hpp" #include "gc/shared/workerPolicy.hpp" #include "logging/log.hpp" -#include "runtime/continuation.hpp" #include "runtime/handles.inline.hpp" #include "utilities/debug.hpp" @@ -210,8 +209,8 @@ void G1FullCollector::collect() { phase4_do_compaction(); - Continuations::on_gc_marking_cycle_finish(); - Continuations::arm_all_nmethods(); + CodeCache::on_gc_marking_cycle_finish(); + CodeCache::arm_all_nmethods(); } void G1FullCollector::complete_collection() { diff --git a/src/hotspot/share/gc/g1/g1MMUTracker.cpp b/src/hotspot/share/gc/g1/g1MMUTracker.cpp index c899779d92ec2..27436149353e0 100644 --- a/src/hotspot/share/gc/g1/g1MMUTracker.cpp +++ b/src/hotspot/share/gc/g1/g1MMUTracker.cpp @@ -111,46 +111,61 @@ void G1MMUTracker::add_pause(double start, double end) { } } +// current_timestamp +// GC events / pause_time +// / | \ \ | / / +// -------------[----]-[---]--[--]---[---]------|[--]-----> Time +// | | | +// | | | +// |<- limit | | +// | |<- balance_timestamp | +// | ^ | +// | | +// |<-------- _time_slice ------>| +// +// The MMU constraint requires that we can spend up to `max_gc_time()` on GC +// pauses inside a window of `_time_slice` length. Therefore, we have a GC +// budget of `max_gc_time() - pause_time`, which is to be accounted for by past +// GC events. +// +// Focusing on GC events that are inside [limit, current_timestamp], we iterate +// over them from the newest to the oldest (right-to-left in the diagram) and +// try to locate the timestamp annotated with ^, so that the accumulated GC +// time inside [balance_timestamp, current_timestamp] is equal to the budget. +// Next, return `balance_timestamp - limit`. +// +// When there are no enough GC events, i.e. we have a surplus buget, a new GC +// pause can start right away, so return 0. double G1MMUTracker::when_sec(double current_timestamp, double pause_time) { + assert(pause_time > 0.0, "precondition"); + // If the pause is over the maximum, just assume that it's the maximum. - double adjusted_pause_time = - (pause_time > max_gc_time()) ? max_gc_time() : pause_time; + pause_time = MIN2(pause_time, max_gc_time()); - // Earliest end time of a hypothetical pause starting now, taking pause_time. - double earliest_end_time = current_timestamp + adjusted_pause_time; - double gc_time_in_recent_time_slice = calculate_gc_time(earliest_end_time) + adjusted_pause_time; + double gc_budget = max_gc_time() - pause_time; - // How much gc time is needed to pass within the MMU window to fit the given pause into the MMU. - double gc_time_to_pass = gc_time_in_recent_time_slice - max_gc_time(); + double limit = current_timestamp + pause_time - _time_slice; + // Iterate from newest to oldest. + for (int i = 0; i < _no_entries; ++i) { + int index = trim_index(_head_index + i); + G1MMUTrackerElem *elem = &_array[index]; + // Outside the window. + if (elem->end_time() <= limit) { + break; + } - // If that time to pass is zero or negative we could start the pause immediately. - if (is_double_leq_0(gc_time_to_pass)) { - return 0.0; - } + double duration = (elem->end_time() - MAX2(elem->start_time(), limit)); + // This duration would exceed (strictly greater than) the budget. + if (duration > gc_budget) { + // This timestamp captures the instant the budget is balanced (or used up). + double balance_timestamp = elem->end_time() - gc_budget; + assert(balance_timestamp >= limit, "inv"); + return balance_timestamp - limit; + } - // Trivially, if the pause is of maximum pause time, the required delay is what the MMU dictates by - // the time slice and maximum gc pause, counted from the end of the last pause. - if (adjusted_pause_time == max_gc_time()) { - G1MMUTrackerElem *elem = &_array[_head_index]; - return (elem->end_time() + (_time_slice - max_gc_time())) - current_timestamp; + gc_budget -= duration; } - // Now go through the recent pause time events, - double limit = earliest_end_time - _time_slice; - int index = _tail_index; - while ( 1 ) { - G1MMUTrackerElem *elem = &_array[index]; - if (elem->end_time() > limit) { - if (elem->start_time() > limit) { - gc_time_to_pass -= elem->duration(); - } else { - gc_time_to_pass -= elem->end_time() - limit; - } - if (is_double_leq_0(gc_time_to_pass)) { - return elem->end_time() + (_time_slice + gc_time_to_pass) - earliest_end_time; - } - } - index = trim_index(index+1); - guarantee(index != trim_index(_head_index + 1), "should not go past head"); - } + // Not enough gc time spent inside the window, we have a budget surplus. + return 0; } diff --git a/src/hotspot/share/gc/g1/g1MMUTracker.hpp b/src/hotspot/share/gc/g1/g1MMUTracker.hpp index 2f42e2d4cb160..b737ea5a5da85 100644 --- a/src/hotspot/share/gc/g1/g1MMUTracker.hpp +++ b/src/hotspot/share/gc/g1/g1MMUTracker.hpp @@ -103,7 +103,6 @@ class G1MMUTracker: public CHeapObj { // Returns the amount of time spent in gc pauses in the time slice before the // given timestamp. double calculate_gc_time(double current_timestamp); - public: G1MMUTracker(double time_slice, double max_gc_time); diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 78e8e9aa31916..b6b4c03465099 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -1190,6 +1190,7 @@ void G1Policy::decide_on_concurrent_start_pause() { log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || (cause == GCCause::_codecache_GC_threshold) || + (cause == GCCause::_codecache_GC_aggressive) || (cause == GCCause::_wb_breakpoint)) { // Initiate a concurrent start. A concurrent start must be a young only // GC, so the collector state must be updated to reflect this. diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index 8008f51067c6c..15bd1c087ef3d 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -1322,8 +1322,7 @@ class G1MergeHeapRootsTask : public WorkerTask { virtual bool do_heap_region(HeapRegion* r) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - if (!r->is_starts_humongous() || - !g1h->region_attr(r->hrm_index()).is_humongous() || + if (!g1h->region_attr(r->hrm_index()).is_humongous() || r->rem_set()->is_empty()) { return false; } diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp index 83051590edaa8..fbb3663969e4c 100644 --- a/src/hotspot/share/gc/g1/heapRegion.cpp +++ b/src/hotspot/share/gc/g1/heapRegion.cpp @@ -363,22 +363,16 @@ class VerifyCodeRootCodeBlobClosure: public CodeBlobClosure { nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); if (nm != NULL) { // Verify that the nemthod is live - if (!nm->is_alive()) { - log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its code roots", + VerifyCodeRootOopClosure oop_cl(_hr); + nm->oops_do(&oop_cl); + if (!oop_cl.has_oops_in_region()) { + log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its code roots with no pointers into region", + p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); + _failures = true; + } else if (oop_cl.failures()) { + log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); _failures = true; - } else { - VerifyCodeRootOopClosure oop_cl(_hr); - nm->oops_do(&oop_cl); - if (!oop_cl.has_oops_in_region()) { - log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its code roots with no pointers into region", - p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); - _failures = true; - } else if (oop_cl.failures()) { - log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, - p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); - _failures = true; - } } } } diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 2f701b2ed084a..71630b8564d3f 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -842,10 +842,6 @@ void ParallelScavengeHeap::verify_nmethod(nmethod* nm) { ScavengableNMethods::verify_nmethod(nm); } -void ParallelScavengeHeap::flush_nmethod(nmethod* nm) { - // nothing particular -} - void ParallelScavengeHeap::prune_scavengable_nmethods() { ScavengableNMethods::prune_nmethods(); } diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index b7edae86a0955..ffde30fbb5a73 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -173,7 +173,6 @@ class ParallelScavengeHeap : public CollectedHeap { virtual void register_nmethod(nmethod* nm); virtual void unregister_nmethod(nmethod* nm); virtual void verify_nmethod(nmethod* nm); - virtual void flush_nmethod(nmethod* nm); void prune_scavengable_nmethods(); diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 00ae30a9dca9c..25bc767e5728a 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -75,7 +75,6 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/continuation.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/safepoint.hpp" @@ -962,8 +961,8 @@ void PSParallelCompact::pre_compact() // Increment the invocation count heap->increment_total_collections(true); - Continuations::on_gc_marking_cycle_start(); - Continuations::arm_all_nmethods(); + CodeCache::on_gc_marking_cycle_start(); + CodeCache::arm_all_nmethods(); // We need to track unique mark sweep invocations as well. _total_invocations++; @@ -995,8 +994,8 @@ void PSParallelCompact::post_compact() GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); ParCompactionManager::remove_all_shadow_regions(); - Continuations::on_gc_marking_cycle_finish(); - Continuations::arm_all_nmethods(); + CodeCache::on_gc_marking_cycle_finish(); + CodeCache::arm_all_nmethods(); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. diff --git a/src/hotspot/share/gc/shared/barrierSet.cpp b/src/hotspot/share/gc/shared/barrierSet.cpp index bfd62381d0ac5..3981c745f963d 100644 --- a/src/hotspot/share/gc/shared/barrierSet.cpp +++ b/src/hotspot/share/gc/shared/barrierSet.cpp @@ -55,12 +55,10 @@ static BarrierSetNMethod* select_barrier_set_nmethod(BarrierSetNMethod* barrier_ if (barrier_set_nmethod != NULL) { // The GC needs nmethod entry barriers to do concurrent GC return barrier_set_nmethod; - } else if (Continuations::enabled()) { - // The GC needs nmethod entry barriers to deal with continuations - return new BarrierSetNMethod(); } else { - // The GC does not need nmethod entry barriers - return NULL; + // The GC needs nmethod entry barriers to deal with continuations + // and code cache unloading + return NOT_ARM32(new BarrierSetNMethod()) ARM32_ONLY(nullptr); } } @@ -77,8 +75,8 @@ BarrierSet::BarrierSet(BarrierSetAssembler* barrier_set_assembler, } void BarrierSet::on_thread_attach(Thread* thread) { - if (Continuations::enabled()) { - BarrierSetNMethod* bs_nm = barrier_set_nmethod(); + BarrierSetNMethod* bs_nm = barrier_set_nmethod(); + if (bs_nm != nullptr) { thread->set_nmethod_disarm_value(bs_nm->disarmed_value()); } } diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp index cbac258abdb60..ca72427ef61d0 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp @@ -85,8 +85,8 @@ bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { OopKeepAliveClosure cl; nm->oops_do(&cl); - // CodeCache sweeper support - nm->mark_as_maybe_on_continuation(); + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); disarm(nm); @@ -130,9 +130,11 @@ void BarrierSetNMethod::arm_all_nmethods() { BarrierSetNMethodArmClosure cl(_current_phase); Threads::threads_do(&cl); +#if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO) // We clear the patching epoch when disarming nmethods, so that // the counter won't overflow. - AARCH64_PORT_ONLY(BarrierSetAssembler::clear_patching_epoch()); + BarrierSetAssembler::clear_patching_epoch(); +#endif } int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) { diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 61d8411a5ea6b..8eb481df2ff63 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -290,9 +290,10 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { GCCauseSetter gcs(this, cause); switch (cause) { case GCCause::_codecache_GC_threshold: + case GCCause::_codecache_GC_aggressive: case GCCause::_heap_inspection: case GCCause::_heap_dump: - case GCCause::_metadata_GC_threshold : { + case GCCause::_metadata_GC_threshold: { HandleMark hm(thread); do_full_collection(false); // don't clear all soft refs break; diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index a0644f9423929..e952ba897ca51 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -479,8 +479,6 @@ class CollectedHeap : public CHeapObj { // Registering and unregistering an nmethod (compiled code) with the heap. virtual void register_nmethod(nmethod* nm) = 0; virtual void unregister_nmethod(nmethod* nm) = 0; - // Callback for when nmethod is about to be deleted. - virtual void flush_nmethod(nmethod* nm) = 0; virtual void verify_nmethod(nmethod* nm) = 0; void trace_heap_before_gc(const GCTracer* gc_tracer); diff --git a/src/hotspot/share/gc/shared/gcBehaviours.cpp b/src/hotspot/share/gc/shared/gcBehaviours.cpp index 2f496cc85aaf5..d0a4eb79a4029 100644 --- a/src/hotspot/share/gc/shared/gcBehaviours.cpp +++ b/src/hotspot/share/gc/shared/gcBehaviours.cpp @@ -29,6 +29,10 @@ IsUnloadingBehaviour* IsUnloadingBehaviour::_current = NULL; +bool IsUnloadingBehaviour::is_unloading(CompiledMethod* cm) { + return _current->has_dead_oop(cm) || cm->as_nmethod()->is_cold(); +} + class IsCompiledMethodUnloadingOopClosure: public OopClosure { BoolObjectClosure *_cl; bool _is_unloading; @@ -61,7 +65,7 @@ class IsCompiledMethodUnloadingOopClosure: public OopClosure { } }; -bool ClosureIsUnloadingBehaviour::is_unloading(CompiledMethod* cm) const { +bool ClosureIsUnloadingBehaviour::has_dead_oop(CompiledMethod* cm) const { if (cm->is_nmethod()) { IsCompiledMethodUnloadingOopClosure cl(_cl); static_cast(cm)->oops_do(&cl, true /* allow_dead */); diff --git a/src/hotspot/share/gc/shared/gcBehaviours.hpp b/src/hotspot/share/gc/shared/gcBehaviours.hpp index e765faf825dcb..6265123f0f61c 100644 --- a/src/hotspot/share/gc/shared/gcBehaviours.hpp +++ b/src/hotspot/share/gc/shared/gcBehaviours.hpp @@ -34,7 +34,8 @@ class IsUnloadingBehaviour { static IsUnloadingBehaviour* _current; public: - virtual bool is_unloading(CompiledMethod* cm) const = 0; + static bool is_unloading(CompiledMethod* cm); + virtual bool has_dead_oop(CompiledMethod* cm) const = 0; static IsUnloadingBehaviour* current() { return _current; } static void set_current(IsUnloadingBehaviour* current) { _current = current; } }; @@ -47,7 +48,7 @@ class ClosureIsUnloadingBehaviour: public IsUnloadingBehaviour { : _cl(is_alive) { } - virtual bool is_unloading(CompiledMethod* cm) const; + virtual bool has_dead_oop(CompiledMethod* cm) const; }; #endif // SHARE_GC_SHARED_GCBEHAVIOURS_HPP diff --git a/src/hotspot/share/gc/shared/gcCause.cpp b/src/hotspot/share/gc/shared/gcCause.cpp index 3f01ee3593937..37cd32b0ff2ba 100644 --- a/src/hotspot/share/gc/shared/gcCause.cpp +++ b/src/hotspot/share/gc/shared/gcCause.cpp @@ -75,6 +75,9 @@ const char* GCCause::to_string(GCCause::Cause cause) { case _codecache_GC_threshold: return "CodeCache GC Threshold"; + case _codecache_GC_aggressive: + return "CodeCache GC Aggressive"; + case _metadata_GC_threshold: return "Metadata GC Threshold"; diff --git a/src/hotspot/share/gc/shared/gcCause.hpp b/src/hotspot/share/gc/shared/gcCause.hpp index c9a2287ddd530..1def51523d6f9 100644 --- a/src/hotspot/share/gc/shared/gcCause.hpp +++ b/src/hotspot/share/gc/shared/gcCause.hpp @@ -64,6 +64,7 @@ class GCCause : public AllStatic { /* implementation specific */ _codecache_GC_threshold, + _codecache_GC_aggressive, _metadata_GC_threshold, _metadata_GC_clear_soft_refs, diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp index 5558f1a4a32cb..7ec0156735d65 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -64,7 +64,6 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/continuation.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -608,8 +607,8 @@ void GenCollectedHeap::do_collection(bool full, increment_total_full_collections(); } - Continuations::on_gc_marking_cycle_start(); - Continuations::arm_all_nmethods(); + CodeCache::on_gc_marking_cycle_start(); + CodeCache::arm_all_nmethods(); collect_generation(_old_gen, full, @@ -618,8 +617,8 @@ void GenCollectedHeap::do_collection(bool full, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs); - Continuations::on_gc_marking_cycle_finish(); - Continuations::arm_all_nmethods(); + CodeCache::on_gc_marking_cycle_finish(); + CodeCache::arm_all_nmethods(); // Adjust generation sizes. _old_gen->compute_new_size(); @@ -662,10 +661,6 @@ void GenCollectedHeap::verify_nmethod(nmethod* nm) { ScavengableNMethods::verify_nmethod(nm); } -void GenCollectedHeap::flush_nmethod(nmethod* nm) { - // Do nothing. -} - void GenCollectedHeap::prune_scavengable_nmethods() { ScavengableNMethods::prune_nmethods(); } diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp index 0d29cb3285c8a..3b91d087b2ccb 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp @@ -211,7 +211,6 @@ class GenCollectedHeap : public CollectedHeap { virtual void register_nmethod(nmethod* nm); virtual void unregister_nmethod(nmethod* nm); virtual void verify_nmethod(nmethod* nm); - virtual void flush_nmethod(nmethod* nm); void prune_scavengable_nmethods(); @@ -385,15 +384,6 @@ class GenCollectedHeap : public CollectedHeap { _incremental_collection_failed = false; } - // Promotion of obj into gen failed. Try to promote obj to higher - // gens in ascending order; return the new location of obj if successful. - // Otherwise, try expand-and-allocate for obj in both the young and old - // generation; return the new location of obj if successful. Otherwise, return NULL. - oop handle_failed_promotion(Generation* old_gen, - oop obj, - size_t obj_size); - - private: // Return true if an allocation should be attempted in the older generation // if it fails in the younger generation. Return false, otherwise. diff --git a/src/hotspot/share/gc/shared/generation.hpp b/src/hotspot/share/gc/shared/generation.hpp index 68885e556d091..93a7a6cc50c9b 100644 --- a/src/hotspot/share/gc/shared/generation.hpp +++ b/src/hotspot/share/gc/shared/generation.hpp @@ -249,12 +249,6 @@ class Generation: public CHeapObj { // avoid repeating the virtual call to retrieve it. virtual oop promote(oop obj, size_t obj_size); - // Informs the current generation that all oop_since_save_marks_iterates - // performed by "thread_num" in the current collection, if any, have been - // completed; any supporting data structures can be reset. Default is to - // do nothing. - virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} - // Returns "true" iff collect() should subsequently be called on this // this generation. See comment below. // This is a generic implementation which can be overridden. diff --git a/src/hotspot/share/gc/shared/parallelCleaning.cpp b/src/hotspot/share/gc/shared/parallelCleaning.cpp index fc84269fb2489..a3630557328dc 100644 --- a/src/hotspot/share/gc/shared/parallelCleaning.cpp +++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp @@ -38,7 +38,7 @@ CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, bool unloading_ _first_nmethod(NULL), _claimed_nmethod(NULL) { // Get first alive nmethod - CompiledMethodIterator iter(CompiledMethodIterator::only_alive); + CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); if(iter.next()) { _first_nmethod = iter.method(); } @@ -52,13 +52,13 @@ CodeCacheUnloadingTask::~CodeCacheUnloadingTask() { void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) { CompiledMethod* first; - CompiledMethodIterator last(CompiledMethodIterator::only_alive); + CompiledMethodIterator last(CompiledMethodIterator::all_blobs); do { *num_claimed_nmethods = 0; first = _claimed_nmethod; - last = CompiledMethodIterator(CompiledMethodIterator::only_alive, first); + last = CompiledMethodIterator(CompiledMethodIterator::all_blobs, first); if (first != NULL) { diff --git a/src/hotspot/share/gc/shared/scavengableNMethods.cpp b/src/hotspot/share/gc/shared/scavengableNMethods.cpp index a462d6a331c8b..24da9eda14f18 100644 --- a/src/hotspot/share/gc/shared/scavengableNMethods.cpp +++ b/src/hotspot/share/gc/shared/scavengableNMethods.cpp @@ -149,8 +149,6 @@ void ScavengableNMethods::nmethods_do_and_prune(CodeBlobToOopClosure* cl) { nmethod* prev = NULL; nmethod* cur = _head; while (cur != NULL) { - assert(cur->is_alive(), "Must be"); - ScavengableNMethodsData data = gc_data(cur); debug_only(data.clear_marked()); assert(data.on_list(), "else shouldn't be on this list"); @@ -215,7 +213,7 @@ void ScavengableNMethods::unlist_nmethod(nmethod* nm, nmethod* prev) { #ifndef PRODUCT // Temporarily mark nmethods that are claimed to be on the scavenge list. void ScavengableNMethods::mark_on_list_nmethods() { - NMethodIterator iter(NMethodIterator::only_alive); + NMethodIterator iter(NMethodIterator::all_blobs); while(iter.next()) { nmethod* nm = iter.method(); ScavengableNMethodsData data = gc_data(nm); @@ -228,7 +226,7 @@ void ScavengableNMethods::mark_on_list_nmethods() { // If the closure is given, run it on the unlisted nmethods. // Also make sure that the effects of mark_on_list_nmethods is gone. void ScavengableNMethods::verify_unlisted_nmethods(CodeBlobClosure* cl) { - NMethodIterator iter(NMethodIterator::only_alive); + NMethodIterator iter(NMethodIterator::all_blobs); while(iter.next()) { nmethod* nm = iter.method(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp index f110e91d80f99..150179c1e05f5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp @@ -63,8 +63,8 @@ bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { // Heal oops ShenandoahNMethod::heal_nmethod(nm); - // CodeCache sweeper support - nm->mark_as_maybe_on_continuation(); + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); // Disarm ShenandoahNMethod::disarm_nmethod(nm); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp index 3c6bfe00e21c6..237e080a584fa 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp @@ -90,13 +90,11 @@ void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) { (Atomic::cmpxchg(&_claimed_idx, current, current + stride, memory_order_relaxed) == current); } if (process_block) { - if (cb->is_alive()) { - f->do_code_blob(cb); + f->do_code_blob(cb); #ifdef ASSERT - if (cb->is_nmethod()) - Universe::heap()->verify_nmethod((nmethod*)cb); + if (cb->is_nmethod()) + Universe::heap()->verify_nmethod((nmethod*)cb); #endif - } } } @@ -120,11 +118,6 @@ void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) { _nmethod_table->unregister_nmethod(nm); } -void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) { - assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); - _nmethod_table->flush_nmethod(nm); -} - void ShenandoahCodeRoots::arm_nmethods() { assert(BarrierSet::barrier_set()->barrier_set_nmethod() != NULL, "Sanity"); BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods(); @@ -187,22 +180,6 @@ class ShenandoahNMethodUnlinkClosure : public NMethodClosure { Atomic::store(&_failed, true); } - void unlink(nmethod* nm) { - // Unlinking of the dependencies must happen before the - // handshake separating unlink and purge. - nm->flush_dependencies(false /* delete_immediately */); - - // unlink_from_method will take the CompiledMethod_lock. - // In this case we don't strictly need it when unlinking nmethods from - // the Method, because it is only concurrently unlinked by - // the entry barrier, which acquires the per nmethod lock. - nm->unlink_from_method(); - - if (nm->is_osr_method()) { - // Invalidate the osr nmethod only once - nm->invalidate_osr_method(); - } - } public: ShenandoahNMethodUnlinkClosure(bool unloading_occurred) : _unloading_occurred(unloading_occurred), @@ -219,13 +196,9 @@ class ShenandoahNMethodUnlinkClosure : public NMethodClosure { ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm); assert(!nm_data->is_unregistered(), "Should not see unregistered entry"); - if (!nm->is_alive()) { - return; - } - if (nm->is_unloading()) { ShenandoahReentrantLocker locker(nm_data->lock()); - unlink(nm); + nm->unlink(); return; } @@ -235,13 +208,9 @@ class ShenandoahNMethodUnlinkClosure : public NMethodClosure { if (_bs->is_armed(nm)) { ShenandoahEvacOOMScope oom_evac_scope; ShenandoahNMethod::heal_nmethod_metadata(nm_data); - if (Continuations::enabled()) { - // Loom needs to know about visited nmethods. Arm the nmethods to get - // mark_as_maybe_on_continuation() callbacks when they are used again. - _bs->arm(nm, 0); - } else { - _bs->disarm(nm); - } + // Code cache unloading needs to know about on-stack nmethods. Arm the nmethods to get + // mark_as_maybe_on_stack() callbacks when they are used again. + _bs->arm(nm, 0); } // Clear compiled ICs and exception caches @@ -308,44 +277,10 @@ void ShenandoahCodeRoots::unlink(WorkerThreads* workers, bool unloading_occurred } } -class ShenandoahNMethodPurgeClosure : public NMethodClosure { -public: - virtual void do_nmethod(nmethod* nm) { - if (nm->is_alive() && nm->is_unloading()) { - nm->make_unloaded(); - } - } -}; - -class ShenandoahNMethodPurgeTask : public WorkerTask { -private: - ShenandoahNMethodPurgeClosure _cl; - ShenandoahConcurrentNMethodIterator _iterator; - -public: - ShenandoahNMethodPurgeTask() : - WorkerTask("Shenandoah Purge NMethods"), - _cl(), - _iterator(ShenandoahCodeRoots::table()) { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - _iterator.nmethods_do_begin(); - } - - ~ShenandoahNMethodPurgeTask() { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - _iterator.nmethods_do_end(); - } - - virtual void work(uint worker_id) { - _iterator.nmethods_do(&_cl); - } -}; - -void ShenandoahCodeRoots::purge(WorkerThreads* workers) { +void ShenandoahCodeRoots::purge() { assert(ShenandoahHeap::heap()->unload_classes(), "Only when running concurrent class unloading"); - ShenandoahNMethodPurgeTask task; - workers->run_task(&task); + CodeCache::flush_unlinked_nmethods(); } ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp index 377d768833337..3493d118a9bc6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp @@ -96,7 +96,7 @@ class ShenandoahCodeRoots : public AllStatic { // Concurrent nmethod unloading support static void unlink(WorkerThreads* workers, bool unloading_occurred); - static void purge(WorkerThreads* workers); + static void purge(); static void arm_nmethods(); static void disarm_nmethods(); static int disarmed_value() { return _disarmed_value; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp index 366f6c3480a52..3bcaa142763a2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp @@ -479,6 +479,7 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) { assert(GCCause::is_user_requested_gc(cause) || GCCause::is_serviceability_requested_gc(cause) || cause == GCCause::_metadata_GC_clear_soft_refs || + cause == GCCause::_codecache_GC_aggressive || cause == GCCause::_codecache_GC_threshold || cause == GCCause::_full_gc_alot || cause == GCCause::_wb_full_gc || diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index d241e3d5b50dd..1adb2d5637e12 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1915,10 +1915,6 @@ void ShenandoahHeap::unregister_nmethod(nmethod* nm) { ShenandoahCodeRoots::unregister_nmethod(nm); } -void ShenandoahHeap::flush_nmethod(nmethod* nm) { - ShenandoahCodeRoots::flush_nmethod(nm); -} - oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) { heap_region_containing(o)->record_pin(); return o; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index ec65a424d35d7..0b38d793998b9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -502,7 +502,6 @@ class ShenandoahHeap : public CollectedHeap { public: void register_nmethod(nmethod* nm); void unregister_nmethod(nmethod* nm); - void flush_nmethod(nmethod* nm); void verify_nmethod(nmethod* nm) {} // ---------- Pinning hooks diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp index c7a9da36a4ddc..501182fb39e58 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp @@ -33,7 +33,6 @@ #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" -#include "runtime/continuation.hpp" ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) : MetadataVisitingOopIterateClosure(rp), @@ -47,17 +46,15 @@ ShenandoahMark::ShenandoahMark() : } void ShenandoahMark::start_mark() { - // Tell the sweeper that we start a marking cycle. - if (!Continuations::is_gc_marking_cycle_active()) { - Continuations::on_gc_marking_cycle_start(); + if (!CodeCache::is_gc_marking_cycle_active()) { + CodeCache::on_gc_marking_cycle_start(); } } void ShenandoahMark::end_mark() { - // Tell the sweeper that we finished a marking cycle. // Unlike other GCs, we do not arm the nmethods // when marking terminates. - Continuations::on_gc_marking_cycle_finish(); + CodeCache::on_gc_marking_cycle_finish(); } void ShenandoahMark::clear() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp index dbc36c5bf826e..781a52cd8963f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp @@ -168,7 +168,6 @@ void ShenandoahNMethod::heal_nmethod(nmethod* nm) { // There is possibility that GC is cancelled when it arrives final mark. // In this case, concurrent root phase is skipped and degenerated GC should be // followed, where nmethods are disarmed. - assert(heap->cancelled_gc() || Continuations::enabled(), "What else?"); } } @@ -300,28 +299,10 @@ void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) { ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); assert(data != NULL, "Sanity"); - if (Thread::current()->is_Code_cache_sweeper_thread()) { - wait_until_concurrent_iteration_done(); - } log_unregister_nmethod(nm); ShenandoahLocker locker(&_lock); assert(contain(nm), "Must have been registered"); - ShenandoahReentrantLocker data_locker(data->lock()); - data->mark_unregistered(); -} - -void ShenandoahNMethodTable::flush_nmethod(nmethod* nm) { - assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); - assert(Thread::current()->is_Code_cache_sweeper_thread(), "Must from Sweep thread"); - ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); - assert(data != NULL, "Sanity"); - - // Can not alter the array when iteration is in progress - wait_until_concurrent_iteration_done(); - log_flush_nmethod(nm); - - ShenandoahLocker locker(&_lock); int idx = index_of(nm); assert(idx >= 0 && idx < _index, "Invalid index"); ShenandoahNMethod::attach_gc_data(nm, NULL); @@ -348,7 +329,6 @@ int ShenandoahNMethodTable::index_of(nmethod* nm) const { void ShenandoahNMethodTable::remove(int idx) { shenandoah_assert_locked_or_safepoint(CodeCache_lock); - assert(!iteration_in_progress(), "Can not happen"); assert(_index >= 0 && _index <= _list->size(), "Sanity"); assert(idx >= 0 && idx < _index, "Out of bound"); @@ -429,16 +409,6 @@ void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) { p2i(nm)); } -void ShenandoahNMethodTable::log_flush_nmethod(nmethod* nm) { - LogTarget(Debug, gc, nmethod) log; - if (!log.is_enabled()) { - return; - } - - ResourceMark rm; - log.print("Flush NMethod: (" PTR_FORMAT ")", p2i(nm)); -} - #ifdef ASSERT void ShenandoahNMethodTable::assert_nmethods_correct() { assert_locked_or_safepoint(CodeCache_lock); @@ -513,11 +483,8 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) { continue; } - // A nmethod can become a zombie before it is unregistered. - if (nmr->nm()->is_alive()) { - nmr->assert_correct(); - f->do_code_blob(nmr->nm()); - } + nmr->assert_correct(); + f->do_code_blob(nmr->nm()); } } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp index ba67517fe2635..4fc90b03bedc9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp @@ -150,7 +150,6 @@ class ShenandoahNMethodTable : public CHeapObj { void register_nmethod(nmethod* nm); void unregister_nmethod(nmethod* nm); - void flush_nmethod(nmethod* nm); bool contain(nmethod* nm) const; int length() const { return _index; } @@ -180,7 +179,6 @@ class ShenandoahNMethodTable : public CHeapObj { // Logging support void log_register_nmethod(nmethod* nm); void log_unregister_nmethod(nmethod* nm); - void log_flush_nmethod(nmethod* nm); }; class ShenandoahConcurrentNMethodIterator { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp index f2b6b0761b15d..f96ade35d7d8d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp @@ -76,7 +76,7 @@ class ShenandoahIsUnloadingOopClosure : public OopClosure { class ShenandoahIsUnloadingBehaviour : public IsUnloadingBehaviour { public: - virtual bool is_unloading(CompiledMethod* method) const { + virtual bool has_dead_oop(CompiledMethod* method) const { nmethod* const nm = method->as_nmethod(); assert(ShenandoahHeap::heap()->is_concurrent_weak_root_in_progress(), "Only for this phase"); ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); @@ -176,7 +176,7 @@ void ShenandoahUnload::unload() { { ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_class_unload_purge_coderoots); SuspendibleThreadSetJoiner sts; - ShenandoahCodeRoots::purge(heap->workers()); + ShenandoahCodeRoots::purge(); } { diff --git a/src/hotspot/share/gc/z/zBarrier.inline.hpp b/src/hotspot/share/gc/z/zBarrier.inline.hpp index 3303e23ccea44..391dd09a8c99d 100644 --- a/src/hotspot/share/gc/z/zBarrier.inline.hpp +++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp @@ -350,7 +350,7 @@ inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) { // unlinking, to get a sense of what nmethods are alive. This will trigger // the keep alive barriers, but the oops are healed and the slow-paths // will not trigger. We have stronger checks in the slow-paths. - assert(ZResurrection::is_blocked() || (Continuations::enabled() && CodeCache::contains((void*)p)), + assert(ZResurrection::is_blocked() || (CodeCache::contains((void*)p)), "This operation is only valid when resurrection is blocked"); const oop o = *p; root_barrier(p, o); diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp index 0e3557778a8ad..c40ba38f36f7c 100644 --- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp @@ -59,8 +59,8 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { ZNMethod::nmethod_oops_barrier(nm); - // CodeCache sweeper support - nm->mark_as_maybe_on_continuation(); + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); // Disarm disarm(nm); diff --git a/src/hotspot/share/gc/z/zCollectedHeap.cpp b/src/hotspot/share/gc/z/zCollectedHeap.cpp index 0fbef3cc7437e..f60fa38da130f 100644 --- a/src/hotspot/share/gc/z/zCollectedHeap.cpp +++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp @@ -265,10 +265,6 @@ void ZCollectedHeap::unregister_nmethod(nmethod* nm) { ZNMethod::unregister_nmethod(nm); } -void ZCollectedHeap::flush_nmethod(nmethod* nm) { - ZNMethod::flush_nmethod(nm); -} - void ZCollectedHeap::verify_nmethod(nmethod* nm) { // Does nothing } diff --git a/src/hotspot/share/gc/z/zCollectedHeap.hpp b/src/hotspot/share/gc/z/zCollectedHeap.hpp index b679a86f63539..dc95fe5c7e251 100644 --- a/src/hotspot/share/gc/z/zCollectedHeap.hpp +++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp @@ -102,7 +102,6 @@ class ZCollectedHeap : public CollectedHeap { virtual void register_nmethod(nmethod* nm); virtual void unregister_nmethod(nmethod* nm); - virtual void flush_nmethod(nmethod* nm); virtual void verify_nmethod(nmethod* nmethod); virtual WorkerThreads* safepoint_workers(); diff --git a/src/hotspot/share/gc/z/zDriver.cpp b/src/hotspot/share/gc/z/zDriver.cpp index f216c71217dac..60daf185956c2 100644 --- a/src/hotspot/share/gc/z/zDriver.cpp +++ b/src/hotspot/share/gc/z/zDriver.cpp @@ -230,7 +230,7 @@ void ZDriver::collect(const ZDriverRequest& request) { case GCCause::_scavenge_alot: case GCCause::_jvmti_force_gc: case GCCause::_metadata_GC_clear_soft_refs: - case GCCause::_codecache_GC_threshold: + case GCCause::_codecache_GC_aggressive: // Start synchronous GC _gc_cycle_port.send_sync(request); break; @@ -241,6 +241,7 @@ void ZDriver::collect(const ZDriverRequest& request) { case GCCause::_z_allocation_stall: case GCCause::_z_proactive: case GCCause::_z_high_usage: + case GCCause::_codecache_GC_threshold: case GCCause::_metadata_GC_threshold: // Start asynchronous GC _gc_cycle_port.send_async(request); diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp index d79717e9d121c..e5b157f3a51b5 100644 --- a/src/hotspot/share/gc/z/zMark.cpp +++ b/src/hotspot/share/gc/z/zMark.cpp @@ -111,11 +111,11 @@ void ZMark::start() { // marking information for all pages. ZGlobalSeqNum++; - // Tell the sweeper that we start a marking cycle. + // Note that we start a marking cycle. // Unlike other GCs, the color switch implicitly changes the nmethods // to be armed, and the thread-local disarm values are lazily updated // when JavaThreads wake up from safepoints. - Continuations::on_gc_marking_cycle_start(); + CodeCache::on_gc_marking_cycle_start(); // Reset flush/continue counters _nproactiveflush = 0; @@ -695,15 +695,11 @@ class ZMarkNMethodClosure : public NMethodClosure { virtual void do_nmethod(nmethod* nm) { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); - if (!nm->is_alive()) { - return; - } - if (ZNMethod::is_armed(nm)) { ZNMethod::nmethod_oops_do_inner(nm, _cl); - // CodeCache sweeper support - nm->mark_as_maybe_on_continuation(); + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); ZNMethod::disarm(nm); } @@ -826,10 +822,10 @@ bool ZMark::end() { // Update statistics ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue); - // Tell the sweeper that we finished a marking cycle. + // Note that we finished a marking cycle. // Unlike other GCs, we do not arm the nmethods // when marking terminates. - Continuations::on_gc_marking_cycle_finish(); + CodeCache::on_gc_marking_cycle_finish(); // Mark completed return true; diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index f35b06e66699a..98073c465f32e 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -172,22 +172,12 @@ void ZNMethod::register_nmethod(nmethod* nm) { } void ZNMethod::unregister_nmethod(nmethod* nm) { - assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - - if (Thread::current()->is_Code_cache_sweeper_thread()) { - // The sweeper must wait for any ongoing iteration to complete - // before it can unregister an nmethod. - ZNMethodTable::wait_until_iteration_done(); - } - ResourceMark rm; log_unregister(nm); ZNMethodTable::unregister_nmethod(nm); -} -void ZNMethod::flush_nmethod(nmethod* nm) { // Destroy GC data delete gc_data(nm); } @@ -216,10 +206,6 @@ void ZNMethod::arm(nmethod* nm, int arm_value) { void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); - if (!nm->is_alive()) { - return; - } - ZNMethod::nmethod_oops_do_inner(nm, cl); } @@ -295,25 +281,6 @@ class ZNMethodUnlinkClosure : public NMethodClosure { Atomic::store(&_failed, true); } - void unlink(nmethod* nm) { - // Unlinking of the dependencies must happen before the - // handshake separating unlink and purge. - nm->flush_dependencies(false /* delete_immediately */); - - // unlink_from_method will take the CompiledMethod_lock. - // In this case we don't strictly need it when unlinking nmethods from - // the Method, because it is only concurrently unlinked by - // the entry barrier, which acquires the per nmethod lock. - nm->unlink_from_method(); - - if (nm->is_osr_method()) { - // Invalidate the osr nmethod before the handshake. The nmethod - // will be made unloaded after the handshake. Then invalidate_osr_method() - // will be called again, which will be a no-op. - nm->invalidate_osr_method(); - } - } - public: ZNMethodUnlinkClosure(bool unloading_occurred) : _unloading_occurred(unloading_occurred), @@ -324,13 +291,9 @@ class ZNMethodUnlinkClosure : public NMethodClosure { return; } - if (!nm->is_alive()) { - return; - } - if (nm->is_unloading()) { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); - unlink(nm); + nm->unlink(); return; } @@ -339,14 +302,7 @@ class ZNMethodUnlinkClosure : public NMethodClosure { if (ZNMethod::is_armed(nm)) { // Heal oops and disarm ZNMethod::nmethod_oops_barrier(nm); - - if (Continuations::enabled()) { - // Loom needs to know about visited nmethods. Arm the nmethods to get - // mark_as_maybe_on_continuation() callbacks when they are used again. - ZNMethod::arm(nm, 0); - } else { - ZNMethod::disarm(nm); - } + ZNMethod::arm(nm, 0); } // Clear compiled ICs and exception caches @@ -407,36 +363,6 @@ void ZNMethod::unlink(ZWorkers* workers, bool unloading_occurred) { } } -class ZNMethodPurgeClosure : public NMethodClosure { -public: - virtual void do_nmethod(nmethod* nm) { - if (nm->is_alive() && nm->is_unloading()) { - nm->make_unloaded(); - } - } -}; - -class ZNMethodPurgeTask : public ZTask { -private: - ZNMethodPurgeClosure _cl; - -public: - ZNMethodPurgeTask() : - ZTask("ZNMethodPurgeTask"), - _cl() { - ZNMethodTable::nmethods_do_begin(); - } - - ~ZNMethodPurgeTask() { - ZNMethodTable::nmethods_do_end(); - } - - virtual void work() { - ZNMethodTable::nmethods_do(&_cl); - } -}; - -void ZNMethod::purge(ZWorkers* workers) { - ZNMethodPurgeTask task; - workers->run(&task); +void ZNMethod::purge() { + CodeCache::flush_unlinked_nmethods(); } diff --git a/src/hotspot/share/gc/z/zNMethod.hpp b/src/hotspot/share/gc/z/zNMethod.hpp index 8969c807bf3a0..c577b864d7332 100644 --- a/src/hotspot/share/gc/z/zNMethod.hpp +++ b/src/hotspot/share/gc/z/zNMethod.hpp @@ -41,7 +41,6 @@ class ZNMethod : public AllStatic { public: static void register_nmethod(nmethod* nm); static void unregister_nmethod(nmethod* nm); - static void flush_nmethod(nmethod* nm); static bool supports_entry_barrier(nmethod* nm); @@ -61,7 +60,7 @@ class ZNMethod : public AllStatic { static ZReentrantLock* lock_for_nmethod(nmethod* nm); static void unlink(ZWorkers* workers, bool unloading_occurred); - static void purge(ZWorkers* workers); + static void purge(); }; #endif // SHARE_GC_Z_ZNMETHOD_HPP diff --git a/src/hotspot/share/gc/z/zUnload.cpp b/src/hotspot/share/gc/z/zUnload.cpp index 335b01721d977..0378466324e8a 100644 --- a/src/hotspot/share/gc/z/zUnload.cpp +++ b/src/hotspot/share/gc/z/zUnload.cpp @@ -75,7 +75,7 @@ class ZIsUnloadingOopClosure : public OopClosure { class ZIsUnloadingBehaviour : public IsUnloadingBehaviour { public: - virtual bool is_unloading(CompiledMethod* method) const { + virtual bool has_dead_oop(CompiledMethod* method) const { nmethod* const nm = method->as_nmethod(); ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); ZLocker locker(lock); @@ -162,7 +162,7 @@ void ZUnload::purge() { { SuspendibleThreadSetJoiner sts; - ZNMethod::purge(_workers); + ZNMethod::purge(); } ClassLoaderDataGraph::purge(/*at_safepoint*/false); diff --git a/src/hotspot/share/interpreter/rewriter.cpp b/src/hotspot/share/interpreter/rewriter.cpp index 2a2505eb5a08f..9dfdd853cf00d 100644 --- a/src/hotspot/share/interpreter/rewriter.cpp +++ b/src/hotspot/share/interpreter/rewriter.cpp @@ -111,7 +111,7 @@ void Rewriter::make_constant_pool_cache(TRAPS) { _pool->initialize_resolved_references(loader_data, _resolved_references_map, _resolved_reference_limit, THREAD); - +#if INCLUDE_CDS if (!HAS_PENDING_EXCEPTION && Arguments::is_dumping_archive()) { if (_pool->pool_holder()->is_shared()) { assert(DynamicDumpSharedSpaces, "must be"); @@ -122,6 +122,7 @@ void Rewriter::make_constant_pool_cache(TRAPS) { cache->save_for_archive(THREAD); } } +#endif // Clean up constant pool cache if initialize_resolved_references() failed. if (HAS_PENDING_EXCEPTION) { diff --git a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp index 62ea206960a19..49c8d027168da 100644 --- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp +++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp @@ -94,7 +94,7 @@ void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) { assert(thread != NULL, "invariant"); assert(thread->thread_state() == _thread_in_vm, "invariant"); - // exclude compiler threads and code sweeper thread + // exclude compiler threads if (thread->is_hidden_from_external_view()) { return; } diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index a415e5115c2fb..d92107424886e 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -561,7 +561,7 @@ - + @@ -602,13 +602,6 @@ - - - - - - - @@ -930,20 +923,6 @@ - - - - - - - - - - - - - - diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp index da859ab1b1b3f..19e4a8d3a41ec 100644 --- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp +++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp @@ -59,7 +59,6 @@ #include "runtime/os_perf.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threads.hpp" -#include "runtime/sweeper.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_version.hpp" #include "services/classLoadingService.hpp" @@ -608,24 +607,6 @@ TRACE_REQUEST_FUNC(CodeCacheConfiguration) { event.commit(); } -TRACE_REQUEST_FUNC(CodeSweeperStatistics) { - EventCodeSweeperStatistics event; - event.set_sweepCount(NMethodSweeper::traversal_count()); - event.set_methodReclaimedCount(NMethodSweeper::total_nof_methods_reclaimed()); - event.set_totalSweepTime(NMethodSweeper::total_time_sweeping()); - event.set_peakFractionTime(NMethodSweeper::peak_sweep_fraction_time()); - event.set_peakSweepTime(NMethodSweeper::peak_sweep_time()); - event.commit(); -} - -TRACE_REQUEST_FUNC(CodeSweeperConfiguration) { - EventCodeSweeperConfiguration event; - event.set_sweeperEnabled(MethodFlushing); - event.set_flushingEnabled(UseCodeCacheFlushing); - event.set_sweepThreshold(NMethodSweeper::sweep_threshold_bytes()); - event.commit(); -} - TRACE_REQUEST_FUNC(ShenandoahHeapRegionInformation) { #if INCLUDE_SHENANDOAHGC if (UseShenandoahGC) { diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp index abb46b975ac1f..496b1a84d3b31 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp @@ -199,6 +199,7 @@ traceid JfrTraceId::load_raw(jclass jc) { return load(jc, true); } +#if INCLUDE_CDS // used by CDS / APPCDS as part of "remove_unshareable_info" void JfrTraceId::remove(const Klass* k) { assert(k != NULL, "invariant"); @@ -230,6 +231,7 @@ void JfrTraceId::restore(const Klass* k) { next_class_id(); } } +#endif // INCLUDE_CDS bool JfrTraceId::in_visible_set(const jclass jc) { assert(jc != NULL, "invariant"); diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp index e1f76a0e238f6..4c31d8af754e9 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp @@ -102,9 +102,11 @@ class JfrTraceId : public AllStatic { static traceid load_raw(const PackageEntry* package); static traceid load_raw(const ClassLoaderData* cld); +#if INCLUDE_CDS static void remove(const Klass* klass); static void remove(const Method* method); static void restore(const Klass* klass); +#endif // set of event classes made visible to java static bool in_visible_set(const Klass* k); diff --git a/src/hotspot/share/jvmci/jvmci.hpp b/src/hotspot/share/jvmci/jvmci.hpp index 07bd1b391edf4..0ddfc76197e7a 100644 --- a/src/hotspot/share/jvmci/jvmci.hpp +++ b/src/hotspot/share/jvmci/jvmci.hpp @@ -107,7 +107,7 @@ class JVMCI : public AllStatic { ok, dependencies_failed, cache_full, - nmethod_reclaimed, // code cache sweeper reclaimed nmethod in between its creation and being marked "in_use" + nmethod_reclaimed, code_too_large, first_permanent_bailout = code_too_large }; diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp index 31ac28c9790f6..4e07d0728a437 100644 --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp @@ -651,7 +651,6 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, JVMCIObject compiled_code, objArrayHandle object_pool, CodeBlob*& cb, - nmethodLocker& nmethod_handle, JVMCIObject installed_code, FailedSpeculation** failed_speculations, char* speculations, @@ -729,9 +728,10 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, } JVMCIObject mirror = installed_code; + nmethod* nm = NULL; // nm is an out parameter of register_method result = runtime()->register_method(jvmci_env(), method, - nmethod_handle, + nm, entry_bci, &_offsets, _orig_pc_offset, @@ -753,7 +753,6 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, speculations, speculations_len); if (result == JVMCI::ok) { - nmethod* nm = nmethod_handle.code()->as_nmethod_or_null(); cb = nm; if (compile_state == NULL) { // This compile didn't come through the CompileBroker so perform the printing here diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp index d7c0f5087c8a7..0ecdf3171963a 100644 --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp @@ -328,7 +328,6 @@ class CodeInstaller : public StackObj { JVMCIObject compiled_code, objArrayHandle object_pool, CodeBlob*& cb, - nmethodLocker& nmethod_handle, JVMCIObject installed_code, FailedSpeculation** failed_speculations, char* speculations, diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index 335ad25d4ef46..3177c60057558 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -516,7 +516,7 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, ARGU // This is a name from a signature. Strip off the trimmings. // Call recursive to keep scope of strippedsym. TempNewSymbol strippedsym = Signature::strip_envelope(class_name); - resolved_klass = SystemDictionary::find_instance_klass(strippedsym, + resolved_klass = SystemDictionary::find_instance_klass(THREAD, strippedsym, class_loader, protection_domain); } else if (Signature::is_array(class_name)) { @@ -524,7 +524,7 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, ARGU int ndim = ss.skip_array_prefix(); if (ss.type() == T_OBJECT) { Symbol* strippedsym = ss.as_symbol(); - resolved_klass = SystemDictionary::find_instance_klass(strippedsym, + resolved_klass = SystemDictionary::find_instance_klass(THREAD, strippedsym, class_loader, protection_domain); if (!resolved_klass.is_null()) { @@ -534,7 +534,7 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, ARGU resolved_klass = TypeArrayKlass::cast(Universe::typeArrayKlassObj(ss.type()))->array_klass(ndim, CHECK_NULL); } } else { - resolved_klass = SystemDictionary::find_instance_klass(class_name, + resolved_klass = SystemDictionary::find_instance_klass(THREAD, class_name, class_loader, protection_domain); } @@ -941,15 +941,14 @@ C2V_VMENTRY_0(jint, installCode0, (JNIEnv *env, jobject, timer->add_nanoseconds(serialization_ns); TraceTime install_time("installCode", timer); - nmethodLocker nmethod_handle; CodeInstaller installer(JVMCIENV); + JVMCI::CodeInstallResult result = installer.install(compiler, compiled_code_buffer, with_type_info, compiled_code_handle, object_pool_handle, cb, - nmethod_handle, installed_code_handle, (FailedSpeculation**)(address) failed_speculations_address, speculations, @@ -976,7 +975,7 @@ C2V_VMENTRY_0(jint, installCode0, (JNIEnv *env, jobject, assert(JVMCIENV->isa_HotSpotNmethod(installed_code_handle), "wrong type"); // Clear the link to an old nmethod first JVMCIObject nmethod_mirror = installed_code_handle; - JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, JVMCI_CHECK_0); + JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, true, JVMCI_CHECK_0); } else { assert(JVMCIENV->isa_InstalledCode(installed_code_handle), "wrong type"); } @@ -1002,8 +1001,7 @@ C2V_VMENTRY_NULL(jobject, disassembleCodeBlob, (JNIEnv* env, jobject, jobject in } JVMCIObject installedCodeObject = JVMCIENV->wrap(installedCode); - nmethodLocker locker; - CodeBlob* cb = JVMCIENV->get_code_blob(installedCodeObject, locker); + CodeBlob* cb = JVMCIENV->get_code_blob(installedCodeObject); if (cb == NULL) { return NULL; } @@ -1017,12 +1015,6 @@ C2V_VMENTRY_NULL(jobject, disassembleCodeBlob, (JNIEnv* env, jobject, jobject in int bufferSize = cb->code_size() * 20 + 1024; char* buffer = NEW_RESOURCE_ARRAY(char, bufferSize); stringStream st(buffer, bufferSize); - if (cb->is_nmethod()) { - nmethod* nm = (nmethod*) cb; - if (!nm->is_alive()) { - return NULL; - } - } Disassembler::decode(cb, &st); if (st.size() <= 0) { return NULL; @@ -1048,8 +1040,7 @@ C2V_VMENTRY_NULL(jobject, executeHotSpotNmethod, (JNIEnv* env, jobject, jobject HandleMark hm(THREAD); JVMCIObject nmethod_mirror = JVMCIENV->wrap(hs_nmethod); - nmethodLocker locker; - nmethod* nm = JVMCIENV->get_nmethod(nmethod_mirror, locker); + nmethod* nm = JVMCIENV->get_nmethod(nmethod_mirror); if (nm == NULL || !nm->is_in_use()) { JVMCI_THROW_NULL(InvalidInstalledCodeException); } @@ -1154,9 +1145,9 @@ C2V_VMENTRY(void, reprofile, (JNIEnv* env, jobject, ARGUMENT_PAIR(method))) C2V_END -C2V_VMENTRY(void, invalidateHotSpotNmethod, (JNIEnv* env, jobject, jobject hs_nmethod)) +C2V_VMENTRY(void, invalidateHotSpotNmethod, (JNIEnv* env, jobject, jobject hs_nmethod, jboolean deoptimize)) JVMCIObject nmethod_mirror = JVMCIENV->wrap(hs_nmethod); - JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, JVMCI_CHECK); + JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, deoptimize, JVMCI_CHECK); C2V_END C2V_VMENTRY_NULL(jlongArray, collectCounters, (JNIEnv* env, jobject)) @@ -2519,12 +2510,11 @@ C2V_VMENTRY_0(jlong, translate, (JNIEnv* env, jobject, jobject obj_handle, jbool Handle constant = thisEnv->asConstant(obj, JVMCI_CHECK_0); result = peerEnv->get_object_constant(constant()); } else if (thisEnv->isa_HotSpotNmethod(obj)) { - nmethodLocker locker; - nmethod* nm = JVMCIENV->get_nmethod(obj, locker); - if (nm != NULL) { - JVMCINMethodData* data = nm->jvmci_nmethod_data(); - if (data != NULL) { - if (peerEnv->is_hotspot()) { + if (peerEnv->is_hotspot()) { + nmethod* nm = JVMCIENV->get_nmethod(obj); + if (nm != NULL) { + JVMCINMethodData* data = nm->jvmci_nmethod_data(); + if (data != NULL) { // Only the mirror in the HotSpot heap is accessible // through JVMCINMethodData oop nmethod_mirror = data->get_nmethod_mirror(nm, /* phantom_ref */ true); @@ -2534,6 +2524,7 @@ C2V_VMENTRY_0(jlong, translate, (JNIEnv* env, jobject, jobject obj_handle, jbool } } } + if (result.is_null()) { JVMCIObject methodObject = thisEnv->get_HotSpotNmethod_method(obj); methodHandle mh(THREAD, thisEnv->asMethod(methodObject)); @@ -2543,6 +2534,7 @@ C2V_VMENTRY_0(jlong, translate, (JNIEnv* env, jobject, jobject obj_handle, jbool const char* cstring = name_string.is_null() ? NULL : thisEnv->as_utf8_string(name_string); // Create a new HotSpotNmethod instance in the peer runtime result = peerEnv->new_HotSpotNmethod(mh, cstring, isDefault, compileIdSnapshot, JVMCI_CHECK_0); + nmethod* nm = JVMCIENV->get_nmethod(obj); if (result.is_null()) { // exception occurred (e.g. OOME) creating a new HotSpotNmethod } else if (nm == NULL) { @@ -2594,20 +2586,22 @@ C2V_VMENTRY_NULL(jobject, unhand, (JNIEnv* env, jobject, jlong obj_handle)) C2V_VMENTRY(void, updateHotSpotNmethod, (JNIEnv* env, jobject, jobject code_handle)) JVMCIObject code = JVMCIENV->wrap(code_handle); // Execute this operation for the side effect of updating the InstalledCode state - nmethodLocker locker; - JVMCIENV->get_nmethod(code, locker); + JVMCIENV->get_nmethod(code); } C2V_VMENTRY_NULL(jbyteArray, getCode, (JNIEnv* env, jobject, jobject code_handle)) JVMCIObject code = JVMCIENV->wrap(code_handle); - nmethodLocker locker; - CodeBlob* cb = JVMCIENV->get_code_blob(code, locker); + CodeBlob* cb = JVMCIENV->get_code_blob(code); if (cb == NULL) { return NULL; } + // Make a resource copy of code before the allocation causes a safepoint int code_size = cb->code_size(); + jbyte* code_bytes = NEW_RESOURCE_ARRAY(jbyte, code_size); + memcpy(code_bytes, (jbyte*) cb->code_begin(), code_size); + JVMCIPrimitiveArray result = JVMCIENV->new_byteArray(code_size, JVMCI_CHECK_NULL); - JVMCIENV->copy_bytes_from((jbyte*) cb->code_begin(), result, 0, code_size); + JVMCIENV->copy_bytes_from(code_bytes, result, 0, code_size); return JVMCIENV->get_jbyteArray(result); } @@ -2868,7 +2862,7 @@ JNINativeMethod CompilerToVM::methods[] = { {CC "getLocalVariableTableStart", CC "(" HS_METHOD2 ")J", FN_PTR(getLocalVariableTableStart)}, {CC "getLocalVariableTableLength", CC "(" HS_METHOD2 ")I", FN_PTR(getLocalVariableTableLength)}, {CC "reprofile", CC "(" HS_METHOD2 ")V", FN_PTR(reprofile)}, - {CC "invalidateHotSpotNmethod", CC "(" HS_NMETHOD ")V", FN_PTR(invalidateHotSpotNmethod)}, + {CC "invalidateHotSpotNmethod", CC "(" HS_NMETHOD "Z)V", FN_PTR(invalidateHotSpotNmethod)}, {CC "collectCounters", CC "()[J", FN_PTR(collectCounters)}, {CC "getCountersSize", CC "()I", FN_PTR(getCountersSize)}, {CC "setCountersSize", CC "(I)Z", FN_PTR(setCountersSize)}, diff --git a/src/hotspot/share/jvmci/jvmciEnv.cpp b/src/hotspot/share/jvmci/jvmciEnv.cpp index a588a664dc565..253d0c48b6f03 100644 --- a/src/hotspot/share/jvmci/jvmciEnv.cpp +++ b/src/hotspot/share/jvmci/jvmciEnv.cpp @@ -156,7 +156,7 @@ void JVMCIEnv::copy_saved_properties() { } } -void JVMCIEnv::init_env_mode_runtime(JavaThread* thread, JNIEnv* parent_env) { +void JVMCIEnv::init_env_mode_runtime(JavaThread* thread, JNIEnv* parent_env, bool attach_OOME_is_fatal) { assert(thread != NULL, "npe"); _env = NULL; _pop_frame_on_close = false; @@ -208,10 +208,16 @@ void JVMCIEnv::init_env_mode_runtime(JavaThread* thread, JNIEnv* parent_env) { attach_args.version = JNI_VERSION_1_2; attach_args.name = const_cast(thread->name()); attach_args.group = NULL; - if (_runtime->AttachCurrentThread(thread, (void**) &_env, &attach_args) != JNI_OK) { + jint attach_result = _runtime->AttachCurrentThread(thread, (void**) &_env, &attach_args); + if (attach_result == JNI_OK) { + _detach_on_close = true; + } else if (!attach_OOME_is_fatal && attach_result == JNI_ENOMEM) { + _env = NULL; + _attach_threw_OOME = true; + return; + } else { fatal("Error attaching current thread (%s) to JVMCI shared library JNI interface", attach_args.name); } - _detach_on_close = true; } } @@ -229,17 +235,22 @@ void JVMCIEnv::init_env_mode_runtime(JavaThread* thread, JNIEnv* parent_env) { } JVMCIEnv::JVMCIEnv(JavaThread* thread, JVMCICompileState* compile_state, const char* file, int line): - _throw_to_caller(false), _file(file), _line(line), _compile_state(compile_state) { - init_env_mode_runtime(thread, NULL); + _throw_to_caller(false), _file(file), _line(line), _attach_threw_OOME(false), _compile_state(compile_state) { + // In case of OOME, there's a good chance a subsequent attempt to attach might succeed. + // Other errors most likely indicate a non-recoverable error in the JVMCI runtime. + init_env_mode_runtime(thread, NULL, false); + if (_attach_threw_OOME) { + compile_state->set_failure(true, "Out of memory while attaching JVMCI compiler to current thread"); + } } JVMCIEnv::JVMCIEnv(JavaThread* thread, const char* file, int line): - _throw_to_caller(false), _file(file), _line(line), _compile_state(NULL) { + _throw_to_caller(false), _file(file), _line(line), _attach_threw_OOME(false), _compile_state(NULL) { init_env_mode_runtime(thread, NULL); } JVMCIEnv::JVMCIEnv(JavaThread* thread, JNIEnv* parent_env, const char* file, int line): - _throw_to_caller(true), _file(file), _line(line), _compile_state(NULL) { + _throw_to_caller(true), _file(file), _line(line), _attach_threw_OOME(false), _compile_state(NULL) { init_env_mode_runtime(thread, parent_env); assert(_env == NULL || parent_env == _env, "mismatched JNIEnvironment"); } @@ -249,6 +260,7 @@ void JVMCIEnv::init(JavaThread* thread, bool is_hotspot, const char* file, int l _throw_to_caller = false; _file = file; _line = line; + _attach_threw_OOME = false; if (is_hotspot) { _env = NULL; _pop_frame_on_close = false; @@ -415,6 +427,9 @@ jboolean JVMCIEnv::transfer_pending_exception(JavaThread* THREAD, JVMCIEnv* peer JVMCIEnv::~JVMCIEnv() { + if (_attach_threw_OOME) { + return; + } if (_throw_to_caller) { if (is_hotspot()) { // Nothing to do @@ -1497,9 +1512,6 @@ void JVMCIEnv::initialize_installed_code(JVMCIObject installed_code, CodeBlob* c // Ignore the version which can stay at 0 if (cb->is_nmethod()) { nmethod* nm = cb->as_nmethod_or_null(); - if (!nm->is_alive()) { - JVMCI_THROW_MSG(InternalError, "nmethod has been reclaimed"); - } if (nm->is_in_use()) { set_InstalledCode_entryPoint(installed_code, (jlong) nm->verified_entry_point()); } @@ -1513,13 +1525,12 @@ void JVMCIEnv::initialize_installed_code(JVMCIObject installed_code, CodeBlob* c } -void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, JVMCI_TRAPS) { +void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JVMCI_TRAPS) { if (mirror.is_null()) { JVMCI_THROW(NullPointerException); } - nmethodLocker locker; - nmethod* nm = JVMCIENV->get_nmethod(mirror, locker); + nmethod* nm = JVMCIENV->get_nmethod(mirror); if (nm == NULL) { // Nothing to do return; @@ -1533,9 +1544,11 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, JVMCI_TRAPS) { "Cannot invalidate HotSpotNmethod object in shared library VM heap from non-JavaThread"); } - nmethodLocker nml(nm); - if (nm->is_alive()) { - // Invalidating the HotSpotNmethod means we want the nmethod to be deoptimized. + if (!deoptimize) { + // Prevent future executions of the nmethod but let current executions complete. + nm->make_not_entrant(); +} else { + // We want the nmethod to be deoptimized immediately. Deoptimization::deoptimize_all_marked(nm); } @@ -1558,57 +1571,39 @@ ConstantPool* JVMCIEnv::asConstantPool(JVMCIObject obj) { return *constantPoolHandle; } -CodeBlob* JVMCIEnv::get_code_blob(JVMCIObject obj, nmethodLocker& locker) { - address code = (address) get_InstalledCode_address(obj); + +// Lookup an nmethod with a matching base and compile id +nmethod* JVMCIEnv::lookup_nmethod(address code, jlong compile_id_snapshot) { if (code == NULL) { return NULL; } - if (isa_HotSpotNmethod(obj)) { - nmethod* nm = NULL; - { - // Lookup the CodeBlob while holding the CodeCache_lock to ensure the nmethod can't be freed - // by nmethod::flush while we're interrogating it. - MutexLocker cm_lock(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeBlob* cb = CodeCache::find_blob_unsafe(code); - if (cb == (CodeBlob*) code) { - nmethod* the_nm = cb->as_nmethod_or_null(); - if (the_nm != NULL && the_nm->is_alive()) { - // Lock the nmethod to stop any further transitions by the sweeper. It's still possible - // for this code to execute in the middle of the sweeping of the nmethod but that will be - // handled below. - locker.set_code(nm, true); - nm = the_nm; - } - } - } - if (nm != NULL) { - // We found the nmethod but it could be in the process of being freed. Check the state of the - // nmethod while holding the CompiledMethod_lock. This ensures that any transitions by other - // threads have seen the is_locked_by_vm() update above. - MutexLocker cm_lock(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); - if (!nm->is_alive()) { - // It was alive when we looked it up but it's no longer alive so release it. - locker.set_code(NULL); - nm = NULL; - } + CodeBlob* cb = CodeCache::find_blob(code); + if (cb == (CodeBlob*) code) { + nmethod* nm = cb->as_nmethod_or_null(); + if (nm != NULL && (compile_id_snapshot == 0 || nm->compile_id() == compile_id_snapshot)) { + return nm; } + } + return NULL; +} + +CodeBlob* JVMCIEnv::get_code_blob(JVMCIObject obj) { + address code = (address) get_InstalledCode_address(obj); + if (code == NULL) { + return NULL; + } + if (isa_HotSpotNmethod(obj)) { jlong compile_id_snapshot = get_HotSpotNmethod_compileIdSnapshot(obj); - if (compile_id_snapshot != 0L) { - // Found a live nmethod with the same address, make sure it's the same nmethod - if (nm == (nmethod*) code && nm->compile_id() == compile_id_snapshot && nm->is_alive()) { - if (nm->is_not_entrant()) { - // Zero the entry point so that the nmethod - // cannot be invoked by the mirror but can - // still be deoptimized. - set_InstalledCode_entryPoint(obj, 0); - } - return nm; - } - // The HotSpotNmethod no longer refers to a valid nmethod so clear the state - locker.set_code(NULL); - nm = NULL; + nmethod* nm = lookup_nmethod(code, compile_id_snapshot); + if (nm != NULL && compile_id_snapshot != 0L && nm->is_not_entrant()) { + // Zero the entry point so that the nmethod + // cannot be invoked by the mirror but can + // still be deoptimized. + set_InstalledCode_entryPoint(obj, 0); + // Refetch the nmethod since the previous call will be a safepoint in libjvmci + nm = lookup_nmethod(code, compile_id_snapshot); } if (nm == NULL) { @@ -1626,8 +1621,8 @@ CodeBlob* JVMCIEnv::get_code_blob(JVMCIObject obj, nmethodLocker& locker) { return cb; } -nmethod* JVMCIEnv::get_nmethod(JVMCIObject obj, nmethodLocker& locker) { - CodeBlob* cb = get_code_blob(obj, locker); +nmethod* JVMCIEnv::get_nmethod(JVMCIObject obj) { + CodeBlob* cb = get_code_blob(obj); if (cb != NULL) { return cb->as_nmethod_or_null(); } diff --git a/src/hotspot/share/jvmci/jvmciEnv.hpp b/src/hotspot/share/jvmci/jvmciEnv.hpp index 7774f166261df..fa76dfae3a2fd 100644 --- a/src/hotspot/share/jvmci/jvmciEnv.hpp +++ b/src/hotspot/share/jvmci/jvmciEnv.hpp @@ -36,7 +36,6 @@ class JVMCIObjectArray; class JVMCIPrimitiveArray; class JVMCICompiler; class JVMCIRuntime; -class nmethodLocker; #define JVMCI_EXCEPTION_CONTEXT \ JavaThread* thread = JavaThread::current(); \ @@ -157,7 +156,7 @@ class JVMCIEnv : public ResourceObj { friend class JNIAccessMark; // Initializes the _env, _mode and _runtime fields. - void init_env_mode_runtime(JavaThread* thread, JNIEnv* parent_env); + void init_env_mode_runtime(JavaThread* thread, JNIEnv* parent_env, bool attach_OOME_is_fatal = true); void init(JavaThread* thread, bool is_hotspot, const char* file, int line); @@ -169,6 +168,7 @@ class JVMCIEnv : public ResourceObj { bool _throw_to_caller; // Propagate an exception raised in this env to the caller? const char* _file; // The file and ... int _line; // ... line where this JNIEnv was created + bool _attach_threw_OOME; // Failed to attach thread due to OutOfMemoryError, the JVMCIEnv is invalid // Translates an exception on the HotSpot heap (i.e., hotspot_env) to an exception on // the shared library heap (i.e., jni_env). The translation includes the stack and cause(s) of `throwable`. @@ -296,6 +296,8 @@ class JVMCIEnv : public ResourceObj { JVMCIPrimitiveArray wrap(jbyteArray obj) { return (JVMCIPrimitiveArray) wrap((jobject) obj); } JVMCIPrimitiveArray wrap(jlongArray obj) { return (JVMCIPrimitiveArray) wrap((jobject) obj); } + nmethod* lookup_nmethod(address code, jlong compile_id_snapshot); + private: JVMCIObject wrap(oop obj) { assert(is_hotspot(), "must be"); return wrap(JNIHandles::make_local(obj)); } JVMCIObjectArray wrap(objArrayOop obj) { assert(is_hotspot(), "must be"); return (JVMCIObjectArray) wrap(JNIHandles::make_local(obj)); } @@ -344,13 +346,11 @@ class JVMCIEnv : public ResourceObj { void fthrow_error(const char* file, int line, const char* format, ...) ATTRIBUTE_PRINTF(4, 5); - // Given an instance of HotSpotInstalledCode return the corresponding CodeBlob*. The - // nmethodLocker is required to keep the CodeBlob alive in the case where it's an nmethod. - CodeBlob* get_code_blob(JVMCIObject code, nmethodLocker& locker); + // Given an instance of HotSpotInstalledCode return the corresponding CodeBlob*. + CodeBlob* get_code_blob(JVMCIObject code); - // Given an instance of HotSpotInstalledCode return the corresponding nmethod. The - // nmethodLocker is required to keep the nmethod alive. - nmethod* get_nmethod(JVMCIObject code, nmethodLocker& locker); + // Given an instance of HotSpotInstalledCode return the corresponding nmethod. + nmethod* get_nmethod(JVMCIObject code); const char* klass_name(JVMCIObject object); @@ -413,9 +413,11 @@ class JVMCIEnv : public ResourceObj { // Destroys a JNI global handle created by JVMCIEnv::make_global. void destroy_global(JVMCIObject object); - // Deoptimizes the nmethod (if any) in the HotSpotNmethod.address - // field of mirror. The field is subsequently zeroed. - void invalidate_nmethod_mirror(JVMCIObject mirror, JVMCI_TRAPS); + // Updates the nmethod (if any) in the HotSpotNmethod.address + // field of `mirror` to prevent it from being called. + // If `deoptimize` is true, the nmethod is immediately deoptimized. + // The HotSpotNmethod.address field is zero upon returning. + void invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimze, JVMCI_TRAPS); void initialize_installed_code(JVMCIObject installed_code, CodeBlob* cb, JVMCI_TRAPS); diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index 096ffea9e2966..b0f6df3a79e63 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -802,13 +802,6 @@ void JVMCINMethodData::set_nmethod_mirror(nmethod* nm, oop new_mirror) { Universe::heap()->register_nmethod(nm); } -void JVMCINMethodData::clear_nmethod_mirror(nmethod* nm) { - if (_nmethod_mirror_index != -1) { - oop* addr = nm->oop_addr_at(_nmethod_mirror_index); - *addr = NULL; - } -} - void JVMCINMethodData::invalidate_nmethod_mirror(nmethod* nm) { oop nmethod_mirror = get_nmethod_mirror(nm, /* phantom_ref */ false); if (nmethod_mirror == NULL) { @@ -821,7 +814,7 @@ void JVMCINMethodData::invalidate_nmethod_mirror(nmethod* nm) { JVMCIEnv* jvmciEnv = NULL; nmethod* current = (nmethod*) HotSpotJVMCI::InstalledCode::address(jvmciEnv, nmethod_mirror); if (nm == current) { - if (!nm->is_alive()) { + if (nm->is_unloading()) { // Break the link from the mirror to nm such that // future invocations via the mirror will result in // an InvalidInstalledCodeException. @@ -835,7 +828,7 @@ void JVMCINMethodData::invalidate_nmethod_mirror(nmethod* nm) { } } - if (_nmethod_mirror_index != -1 && nm->is_unloaded()) { + if (_nmethod_mirror_index != -1 && nm->is_unloading()) { // Drop the reference to the nmethod mirror object but don't clear the actual oop reference. Otherwise // it would appear that the nmethod didn't need to be unloaded in the first place. _nmethod_mirror_index = -1; @@ -1663,7 +1656,7 @@ Klass* JVMCIRuntime::get_klass_by_name_impl(Klass*& accessing_klass, if (!require_local) { found_klass = SystemDictionary::find_constrained_instance_or_array_klass(THREAD, sym, loader); } else { - found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain); + found_klass = SystemDictionary::find_instance_or_array_klass(THREAD, sym, loader, domain); } } @@ -2008,7 +2001,7 @@ void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, c bool retryable = JVMCIENV->get_HotSpotCompilationRequestResult_retry(result_object) != 0; compile_state->set_failure(retryable, failure_reason, true); } else { - if (compile_state->task()->code() == nullptr) { + if (!compile_state->task()->is_success()) { compile_state->set_failure(true, "no nmethod produced"); } else { compile_state->task()->set_num_inlined_bytecodes(JVMCIENV->get_HotSpotCompilationRequestResult_inlinedBytecodes(result_object)); @@ -2040,30 +2033,29 @@ bool JVMCIRuntime::is_gc_supported(JVMCIEnv* JVMCIENV, CollectedHeap::Name name) // ------------------------------------------------------------------ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, - const methodHandle& method, - nmethodLocker& code_handle, - int entry_bci, - CodeOffsets* offsets, - int orig_pc_offset, - CodeBuffer* code_buffer, - int frame_words, - OopMapSet* oop_map_set, - ExceptionHandlerTable* handler_table, - ImplicitExceptionTable* implicit_exception_table, - AbstractCompiler* compiler, - DebugInformationRecorder* debug_info, - Dependencies* dependencies, - int compile_id, - bool has_monitors, - bool has_unsafe_access, - bool has_wide_vector, - JVMCIObject compiled_code, - JVMCIObject nmethod_mirror, - FailedSpeculation** failed_speculations, - char* speculations, - int speculations_len) { + const methodHandle& method, + nmethod*& nm, + int entry_bci, + CodeOffsets* offsets, + int orig_pc_offset, + CodeBuffer* code_buffer, + int frame_words, + OopMapSet* oop_map_set, + ExceptionHandlerTable* handler_table, + ImplicitExceptionTable* implicit_exception_table, + AbstractCompiler* compiler, + DebugInformationRecorder* debug_info, + Dependencies* dependencies, + int compile_id, + bool has_monitors, + bool has_unsafe_access, + bool has_wide_vector, + JVMCIObject compiled_code, + JVMCIObject nmethod_mirror, + FailedSpeculation** failed_speculations, + char* speculations, + int speculations_len) { JVMCI_EXCEPTION_CONTEXT; - nmethod* nm = NULL; CompLevel comp_level = CompLevel_full_optimization; char* failure_detail = NULL; @@ -2090,6 +2082,9 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, } if (result == JVMCI::ok) { + // Check if memory should be freed before allocation + CodeCache::gc_on_allocation(); + // To prevent compile queue updates. MutexLocker locker(THREAD, MethodCompileQueue_lock); @@ -2154,12 +2149,6 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, nm->set_has_wide_vectors(has_wide_vector); nm->set_has_monitors(has_monitors); - // Record successful registration. - // (Put nm into the task handle *before* publishing to the Java heap.) - if (JVMCIENV->compile_state() != NULL) { - JVMCIENV->compile_state()->task()->set_code(nm); - } - JVMCINMethodData* data = nm->jvmci_nmethod_data(); assert(data != NULL, "must be"); if (install_default) { @@ -2214,9 +2203,6 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, } } } - if (result == JVMCI::ok) { - code_handle.set_code(nm); - } } // String creation must be done outside lock @@ -2227,8 +2213,11 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, } if (result == JVMCI::ok) { - // JVMTI -- compiled method notification (must be done outside lock) - nm->post_compiled_method_load_event(); + JVMCICompileState* state = JVMCIENV->compile_state(); + if (state != NULL) { + // Compilation succeeded, post what we know about it + nm->post_compiled_method(state->task()); + } } return result; diff --git a/src/hotspot/share/jvmci/jvmciRuntime.hpp b/src/hotspot/share/jvmci/jvmciRuntime.hpp index d1be337df9525..d982e15015514 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.hpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp @@ -94,9 +94,6 @@ class JVMCINMethodData { // Sets the mirror in nm's oops table. void set_nmethod_mirror(nmethod* nm, oop mirror); - - // Clears the mirror in nm's oops table. - void clear_nmethod_mirror(nmethod* nm); }; // A top level class that represents an initialized JVMCI runtime. @@ -399,28 +396,28 @@ class JVMCIRuntime: public CHeapObj { // Register the result of a compilation. JVMCI::CodeInstallResult register_method(JVMCIEnv* JVMCIENV, - const methodHandle& target, - nmethodLocker& code_handle, - int entry_bci, - CodeOffsets* offsets, - int orig_pc_offset, - CodeBuffer* code_buffer, - int frame_words, - OopMapSet* oop_map_set, - ExceptionHandlerTable* handler_table, - ImplicitExceptionTable* implicit_exception_table, - AbstractCompiler* compiler, - DebugInformationRecorder* debug_info, - Dependencies* dependencies, - int compile_id, - bool has_monitors, - bool has_unsafe_access, - bool has_wide_vector, - JVMCIObject compiled_code, - JVMCIObject nmethod_mirror, - FailedSpeculation** failed_speculations, - char* speculations, - int speculations_len); + const methodHandle& target, + nmethod*& nm, + int entry_bci, + CodeOffsets* offsets, + int orig_pc_offset, + CodeBuffer* code_buffer, + int frame_words, + OopMapSet* oop_map_set, + ExceptionHandlerTable* handler_table, + ImplicitExceptionTable* implicit_exception_table, + AbstractCompiler* compiler, + DebugInformationRecorder* debug_info, + Dependencies* dependencies, + int compile_id, + bool has_monitors, + bool has_unsafe_access, + bool has_wide_vector, + JVMCIObject compiled_code, + JVMCIObject nmethod_mirror, + FailedSpeculation** failed_speculations, + char* speculations, + int speculations_len); // Detach `thread` from this runtime and destroy this runtime's JavaVM // if using one JavaVM per JVMCI compilation . diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 25262f0f16271..9ffe987d4e0dc 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -234,7 +234,6 @@ volatile_nonstatic_field(Method, _code, CompiledMethod*) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \ \ - nonstatic_field(MethodCounters, _nmethod_age, int) \ nonstatic_field(MethodCounters, _invoke_mask, int) \ nonstatic_field(MethodCounters, _backedge_mask, int) \ nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp index f64cc1b3942e8..6c6e99a064415 100644 --- a/src/hotspot/share/memory/allocation.hpp +++ b/src/hotspot/share/memory/allocation.hpp @@ -270,6 +270,7 @@ class MetaspaceObj { // non-shared or shared metaspace. static bool is_valid(const MetaspaceObj* p); +#if INCLUDE_CDS static bool is_shared(const MetaspaceObj* p) { // If no shared metaspace regions are mapped, _shared_metaspace_{base,top} will // both be NULL and all values of p will be rejected quickly. @@ -277,6 +278,10 @@ class MetaspaceObj { ((void*)p) >= _shared_metaspace_base); } bool is_shared() const { return MetaspaceObj::is_shared(this); } +#else + static bool is_shared(const MetaspaceObj* p) { return false; } + bool is_shared() const { return false; } +#endif void print_address_on(outputStream* st) const; // nonvirtual address printing diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp index 6d5947d4bb25e..6b6a8c9cf2743 100644 --- a/src/hotspot/share/memory/heap.cpp +++ b/src/hotspot/share/memory/heap.cpp @@ -483,7 +483,7 @@ void* CodeHeap::find_start(void* p) const { // Find block which contains the passed pointer. // Same as find_start(p), but with additional safety net. -CodeBlob* CodeHeap::find_blob_unsafe(void* start) const { +CodeBlob* CodeHeap::find_blob(void* start) const { CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start); return (result != NULL && result->blob_contains((address)start)) ? result : NULL; } diff --git a/src/hotspot/share/memory/heap.hpp b/src/hotspot/share/memory/heap.hpp index f68fa3df7562d..4b25d3bdba001 100644 --- a/src/hotspot/share/memory/heap.hpp +++ b/src/hotspot/share/memory/heap.hpp @@ -176,7 +176,7 @@ class CodeHeap : public CHeapObj { } virtual void* find_start(void* p) const; // returns the block containing p or NULL - virtual CodeBlob* find_blob_unsafe(void* start) const; + virtual CodeBlob* find_blob(void* start) const; size_t alignment_unit() const; // alignment of any block size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit static size_t header_size() { return sizeof(HeapBlock); } // returns the header size for each heap block diff --git a/src/hotspot/share/memory/iterator.cpp b/src/hotspot/share/memory/iterator.cpp index 08470b4ed97b9..d2f28151d0105 100644 --- a/src/hotspot/share/memory/iterator.cpp +++ b/src/hotspot/share/memory/iterator.cpp @@ -62,8 +62,8 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) { nm->oops_do(_cl); if (_keepalive_nmethods) { - // CodeCache sweeper support - nm->mark_as_maybe_on_continuation(); + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm != NULL) { diff --git a/src/hotspot/share/memory/iterator.hpp b/src/hotspot/share/memory/iterator.hpp index 797bd7abcc4f5..b6a3a393842e0 100644 --- a/src/hotspot/share/memory/iterator.hpp +++ b/src/hotspot/share/memory/iterator.hpp @@ -107,7 +107,7 @@ class OopIterateClosure : public OopClosure { // Class redefinition needs to get notified about methods from stackChunkOops virtual void do_method(Method* m) = 0; - // The code cache sweeper needs to get notified about methods from stackChunkOops + // The code cache unloading needs to get notified about methods from stackChunkOops virtual void do_nmethod(nmethod* nm) = 0; }; diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index d54ad73e6939f..545b1fa3abbe9 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -1002,23 +1002,30 @@ const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { } } -void Metaspace::purge() { +void Metaspace::purge(bool classes_unloaded) { // The MetaspaceCritical_lock is used by a concurrent GC to block out concurrent metaspace // allocations, that would starve critical metaspace allocations, that are about to throw // OOM if they fail; they need precedence for correctness. MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); - ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); - if (cm != NULL) { - cm->purge(); - } - if (using_class_space()) { - cm = ChunkManager::chunkmanager_class(); + if (classes_unloaded) { + ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); if (cm != NULL) { cm->purge(); } + if (using_class_space()) { + cm = ChunkManager::chunkmanager_class(); + if (cm != NULL) { + cm->purge(); + } + } } - MetaspaceCriticalAllocation::satisfy(); + // Try to satisfy queued metaspace allocation requests. + // + // It might seem unnecessary to try to process allocation requests if no + // classes have been unloaded. However, this call is required for the code + // in MetaspaceCriticalAllocation::try_allocate_critical to work. + MetaspaceCriticalAllocation::process(); } bool Metaspace::contains(const void* ptr) { diff --git a/src/hotspot/share/memory/metaspace.hpp b/src/hotspot/share/memory/metaspace.hpp index c9cf6bd5f8860..d3f9a3721ac08 100644 --- a/src/hotspot/share/memory/metaspace.hpp +++ b/src/hotspot/share/memory/metaspace.hpp @@ -121,7 +121,7 @@ class Metaspace : public AllStatic { static bool contains_non_shared(const void* ptr); // Free empty virtualspaces - static void purge(); + static void purge(bool classes_unloaded); static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS); diff --git a/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp b/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp index 08400e5b5d4a7..26e95238477db 100644 --- a/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp +++ b/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp @@ -33,12 +33,12 @@ #include "runtime/mutexLocker.hpp" class MetadataAllocationRequest { - ClassLoaderData* _loader_data; - size_t _word_size; - Metaspace::MetadataType _type; - MetadataAllocationRequest* _next; - MetaWord* _result; - bool _has_result; + ClassLoaderData* const _loader_data; + const size_t _word_size; + const Metaspace::MetadataType _type; + MetadataAllocationRequest* _next; + MetaWord* _result; + bool _is_processed; public: MetadataAllocationRequest(ClassLoaderData* loader_data, @@ -49,7 +49,7 @@ class MetadataAllocationRequest { _type(type), _next(NULL), _result(NULL), - _has_result(false) { + _is_processed(false) { MetaspaceCriticalAllocation::add(this); } @@ -57,17 +57,17 @@ class MetadataAllocationRequest { MetaspaceCriticalAllocation::remove(this); } - ClassLoaderData* loader_data() const { return _loader_data; } - size_t word_size() const { return _word_size; } - Metaspace::MetadataType type() const { return _type; } - MetadataAllocationRequest* next() const { return _next; } - MetaWord* result() const { return _result; } - bool has_result() const { return _has_result; } + ClassLoaderData* loader_data() const { return _loader_data; } + size_t word_size() const { return _word_size; } + Metaspace::MetadataType type() const { return _type; } + MetadataAllocationRequest* next() const { return _next; } + MetaWord* result() const { return _result; } + bool is_processed() const { return _is_processed; } void set_next(MetadataAllocationRequest* next) { _next = next; } void set_result(MetaWord* result) { _result = result; - _has_result = true; + _is_processed = true; } }; @@ -113,13 +113,47 @@ void MetaspaceCriticalAllocation::remove(MetadataAllocationRequest* request) { } bool MetaspaceCriticalAllocation::try_allocate_critical(MetadataAllocationRequest* request) { + // This function uses an optimized scheme to limit the number of triggered + // GCs. The idea is that only one request in the list is responsible for + // triggering a GC, and later requests will try to piggy-back on that + // request. + // + // For this to work, it is important that we can tell which requests were + // seen by the GC's call to process(), and which requests were added after + // last proccess() call. The property '_is_processed' tells this. Because the + // logic below relies on that property, it is important that the GC calls + // process() even when the GC didn't unload any classes. + // + // Note that process() leaves the requests in the queue, so that threads + // in wait_for_purge, which had their requests processed, but didn't get any + // memory can exit that function and trigger a new GC as a last effort to get + // memory before throwing an OOME. + // + // Requests that have been processed once, will not trigger new GCs, we + // therefore filter them out when we determine if the current 'request' + // needs to trigger a GC, or if there are earlier requests that will + // trigger a GC. + { MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); - if (_requests_head == request) { - // The first request can't opportunistically ride on a previous GC + auto is_first_unprocessed = [&]() { + for (MetadataAllocationRequest* curr = _requests_head; curr != NULL; curr = curr->next()) { + if (!curr->is_processed()) { + // curr is the first not satisfied request + return curr == request; + } + } + + return false; + }; + + if (is_first_unprocessed()) { + // The first non-processed request takes ownership of triggering the GC + // on behalf of itself, and all trailing requests in the list. return false; } } + // Try to ride on a previous GC and hope for early satisfaction wait_for_purge(request); return request->result() != NULL; @@ -129,7 +163,9 @@ void MetaspaceCriticalAllocation::wait_for_purge(MetadataAllocationRequest* requ ThreadBlockInVM tbivm(JavaThread::current()); MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); for (;;) { - if (request->has_result()) { + if (request->is_processed()) { + // The GC has procesed this request during the purge. + // Return and check the result, and potentially call a last-effort GC. break; } MetaspaceCritical_lock->wait_without_safepoint_check(); @@ -144,12 +180,12 @@ void MetaspaceCriticalAllocation::block_if_concurrent_purge() { } } -void MetaspaceCriticalAllocation::satisfy() { +void MetaspaceCriticalAllocation::process() { assert_lock_strong(MetaspaceCritical_lock); bool all_satisfied = true; for (MetadataAllocationRequest* curr = _requests_head; curr != NULL; curr = curr->next()) { if (curr->result() != NULL) { - // Don't satisfy twice + // Don't satisfy twice (can still be processed twice) continue; } // Try to allocate metadata. diff --git a/src/hotspot/share/memory/metaspaceCriticalAllocation.hpp b/src/hotspot/share/memory/metaspaceCriticalAllocation.hpp index bdc9ad8cf2c4a..4ef0478d3f382 100644 --- a/src/hotspot/share/memory/metaspaceCriticalAllocation.hpp +++ b/src/hotspot/share/memory/metaspaceCriticalAllocation.hpp @@ -52,7 +52,7 @@ class ClassLoaderData; // survived that situation in theory. The motivation is that we are at this point so close // to being out of memory, and the VM is not having a good time, so the user really ought // to increase the amount of available metaspace anyway, instead of GC:ing around more -// to satisfy a very small number of additional allocations. But it does solve pathologial +// to satisfy a very small number of additional allocations. But it does solve pathological // unbounded starvation scenarios where OOM can get thrown even though most of metaspace // is full of dead metadata. // @@ -77,7 +77,7 @@ class MetaspaceCriticalAllocation : public AllStatic { public: static void block_if_concurrent_purge(); - static void satisfy(); + static void process(); static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size, Metaspace::MetadataType type); }; diff --git a/src/hotspot/share/oops/arrayKlass.cpp b/src/hotspot/share/oops/arrayKlass.cpp index c3b0182114b27..9d7cd13177e83 100644 --- a/src/hotspot/share/oops/arrayKlass.cpp +++ b/src/hotspot/share/oops/arrayKlass.cpp @@ -156,6 +156,7 @@ void ArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) { it->push((Klass**)&_lower_dimension); } +#if INCLUDE_CDS void ArrayKlass::remove_unshareable_info() { Klass::remove_unshareable_info(); if (_higher_dimension != NULL) { @@ -182,6 +183,7 @@ void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle p ak->restore_unshareable_info(loader_data, protection_domain, CHECK); } } +#endif // INCLUDE_CDS // Printing diff --git a/src/hotspot/share/oops/arrayKlass.hpp b/src/hotspot/share/oops/arrayKlass.hpp index 62138ce862df3..f94e038510bfe 100644 --- a/src/hotspot/share/oops/arrayKlass.hpp +++ b/src/hotspot/share/oops/arrayKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,10 +114,12 @@ class ArrayKlass: public Klass { // JVMTI support jint jvmti_class_status() const; +#if INCLUDE_CDS // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); virtual void remove_java_mirror(); void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS); +#endif // Printing void print_on(outputStream* st) const; diff --git a/src/hotspot/share/oops/compressedOops.hpp b/src/hotspot/share/oops/compressedOops.hpp index 2755b1eef9222..bdcc1289899e7 100644 --- a/src/hotspot/share/oops/compressedOops.hpp +++ b/src/hotspot/share/oops/compressedOops.hpp @@ -94,7 +94,7 @@ class CompressedOops : public AllStatic { static int shift() { return _narrow_oop._shift; } static bool use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; } - static address* ptrs_base_addr() { return &_narrow_oop._base; } + static address ptrs_base_addr() { return (address)&_narrow_oop._base; } static address ptrs_base() { return _narrow_oop._base; } static bool is_in(void* addr); diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp index 9b20ea0dd9cf2..c8ec68a21ae55 100644 --- a/src/hotspot/share/oops/constantPool.cpp +++ b/src/hotspot/share/oops/constantPool.cpp @@ -329,6 +329,7 @@ void ConstantPool::add_dumped_interned_strings() { } #endif +#if INCLUDE_CDS // CDS support. Create a new resolved_references array. void ConstantPool::restore_unshareable_info(TRAPS) { if (!_pool_holder->is_linked() && !_pool_holder->is_rewritten()) { @@ -342,9 +343,6 @@ void ConstantPool::restore_unshareable_info(TRAPS) { // Only create the new resolved references array if it hasn't been attempted before if (resolved_references() != NULL) return; - // restore the C++ vtable from the shared archive - restore_vtable(); - if (vmClasses::Object_klass_loaded()) { ClassLoaderData* loader_data = pool_holder()->class_loader_data(); #if INCLUDE_CDS_JAVA_HEAP @@ -427,6 +425,7 @@ void ConstantPool::remove_unshareable_info() { cache()->remove_unshareable_info(); } } +#endif // INCLUDE_CDS int ConstantPool::cp_to_object_index(int cp_index) { // this is harder don't do this so much. @@ -584,7 +583,7 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w oop protection_domain = this_cp->pool_holder()->protection_domain(); Handle h_prot (current, protection_domain); Handle h_loader (current, loader); - Klass* k = SystemDictionary::find_instance_klass(name, h_loader, h_prot); + Klass* k = SystemDictionary::find_instance_klass(current, name, h_loader, h_prot); // Avoid constant pool verification at a safepoint, as it takes the Module_lock. if (k != NULL && current->is_Java_thread()) { @@ -2214,15 +2213,15 @@ int ConstantPool::copy_cpool_bytes(int cpool_size, #undef DBG -bool ConstantPool::is_maybe_on_continuation_stack() const { - // This method uses the similar logic as nmethod::is_maybe_on_continuation_stack() +bool ConstantPool::is_maybe_on_stack() const { + // This method uses the similar logic as nmethod::is_maybe_on_stack() if (!Continuations::enabled()) { return false; } // If the condition below is true, it means that the nmethod was found to // be alive the previous completed marking cycle. - return cache()->gc_epoch() >= Continuations::previous_completed_gc_marking_cycle(); + return cache()->gc_epoch() >= CodeCache::previous_completed_gc_marking_cycle(); } // For redefinition, if any methods found in loom stack chunks, the gc_epoch is @@ -2237,7 +2236,7 @@ bool ConstantPool::on_stack() const { return false; } - return is_maybe_on_continuation_stack(); + return is_maybe_on_stack(); } void ConstantPool::set_on_stack(const bool value) { @@ -2271,9 +2270,10 @@ void ConstantPool::print_on(outputStream* st) const { st->print_cr(" - holder: " INTPTR_FORMAT, p2i(pool_holder())); } st->print_cr(" - cache: " INTPTR_FORMAT, p2i(cache())); - st->print_cr(" - resolved_references: " INTPTR_FORMAT, p2i(resolved_references())); + st->print_cr(" - resolved_references: " INTPTR_FORMAT, p2i(resolved_references_or_null())); st->print_cr(" - reference_map: " INTPTR_FORMAT, p2i(reference_map())); st->print_cr(" - resolved_klasses: " INTPTR_FORMAT, p2i(resolved_klasses())); + st->print_cr(" - cp length: %d", length()); for (int index = 1; index < length(); index++) { // Index 0 is unused ((ConstantPool*)this)->print_entry_on(index, st); diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp index aa0b4ee592f3d..c768928712ab5 100644 --- a/src/hotspot/share/oops/constantPool.hpp +++ b/src/hotspot/share/oops/constantPool.hpp @@ -205,7 +205,7 @@ class ConstantPool : public Metadata { // can't be removed from the set of previous versions saved in the instance // class. bool on_stack() const; - bool is_maybe_on_continuation_stack() const; + bool is_maybe_on_stack() const; void set_on_stack(const bool value); // Faster than MetaspaceObj::is_shared() - used by set_on_stack() @@ -691,17 +691,14 @@ class ConstantPool : public Metadata { resolve_string_constants_impl(h_this, CHECK); } +#if INCLUDE_CDS // CDS support void archive_resolved_references() NOT_CDS_JAVA_HEAP_RETURN; void add_dumped_interned_strings() NOT_CDS_JAVA_HEAP_RETURN; void resolve_class_constants(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; void remove_unshareable_info(); void restore_unshareable_info(TRAPS); - // The ConstantPool vtable is restored by this call when the ConstantPool is - // in the shared archive. See patch_klass_vtables() in metaspaceShared.cpp for - // all the gory details. SA, dtrace and pstack helpers distinguish metadata - // by their vtable. - void restore_vtable() { guarantee(is_constantPool(), "vtable restored by this call"); } +#endif private: enum { _no_index_sentinel = -1, _possible_index_sentinel = -2 }; diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp index 9a64039d4e4d4..ef66ea6e26166 100644 --- a/src/hotspot/share/oops/cpCache.cpp +++ b/src/hotspot/share/oops/cpCache.cpp @@ -29,6 +29,7 @@ #include "classfile/systemDictionary.hpp" #include "classfile/systemDictionaryShared.hpp" #include "classfile/vmClasses.hpp" +#include "code/codeCache.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/bytecodes.hpp" #include "interpreter/interpreter.hpp" @@ -48,7 +49,6 @@ #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" #include "runtime/atomic.hpp" -#include "runtime/continuation.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/vm_version.hpp" @@ -681,21 +681,19 @@ void ConstantPoolCache::initialize(const intArray& inverse_index_map, // Record the GC marking cycle when redefined vs. when found in the loom stack chunks. void ConstantPoolCache::record_gc_epoch() { - _gc_epoch = Continuations::gc_epoch(); + _gc_epoch = CodeCache::gc_epoch(); } -void ConstantPoolCache::save_for_archive(TRAPS) { #if INCLUDE_CDS +void ConstantPoolCache::save_for_archive(TRAPS) { ClassLoaderData* loader_data = constant_pool()->pool_holder()->class_loader_data(); _initial_entries = MetadataFactory::new_array(loader_data, length(), CHECK); for (int i = 0; i < length(); i++) { _initial_entries->at_put(i, *entry_at(i)); } -#endif } void ConstantPoolCache::remove_unshareable_info() { -#if INCLUDE_CDS Arguments::assert_is_dumping_archive(); // is the copy to be written into the archive. It's in the ArchiveBuilder's "buffer space". // However, this->_initial_entries was not copied/relocated by the ArchiveBuilder, so it's @@ -708,8 +706,8 @@ void ConstantPoolCache::remove_unshareable_info() { *entry_at(i) = _initial_entries->at(i); } _initial_entries = NULL; -#endif } +#endif // INCLUDE_CDS void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) { assert(!is_shared(), "shared caches are not deallocated"); diff --git a/src/hotspot/share/oops/cpCache.hpp b/src/hotspot/share/oops/cpCache.hpp index 3e9a607031456..369c6953a507d 100644 --- a/src/hotspot/share/oops/cpCache.hpp +++ b/src/hotspot/share/oops/cpCache.hpp @@ -455,9 +455,11 @@ class ConstantPoolCache: public MetaspaceObj { // Assembly code support static int resolved_references_offset_in_bytes() { return offset_of(ConstantPoolCache, _resolved_references); } - // CDS support +#if INCLUDE_CDS void remove_unshareable_info(); void save_for_archive(TRAPS); +#endif + private: void walk_entries_for_initialization(bool check_only); void set_length(int length) { _length = length; } diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 9083153320c2d..a29800c9c1a55 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -2341,10 +2341,6 @@ void InstanceKlass::add_dependent_nmethod(nmethod* nm) { dependencies().add_dependent_nmethod(nm); } -void InstanceKlass::remove_dependent_nmethod(nmethod* nm) { - dependencies().remove_dependent_nmethod(nm); -} - void InstanceKlass::clean_dependency_context() { dependencies().clean_unloading_dependents(); } @@ -2455,6 +2451,7 @@ void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) { it->push(&_record_components); } +#if INCLUDE_CDS void InstanceKlass::remove_unshareable_info() { if (is_linked()) { @@ -2655,6 +2652,7 @@ void InstanceKlass::assign_class_loader_type() { set_shared_class_loader_type(ClassLoader::APP_LOADER); } } +#endif // INCLUDE_CDS #if INCLUDE_JVMTI static void clear_all_breakpoints(Method* m) { diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index a0b5210926c7a..b149931ee958e 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -939,7 +939,6 @@ class InstanceKlass: public Klass { inline DependencyContext dependencies(); int mark_dependent_nmethods(KlassDepChange& changes); void add_dependent_nmethod(nmethod* nm); - void remove_dependent_nmethod(nmethod* nm); void clean_dependency_context(); // On-stack replacement support @@ -1212,12 +1211,15 @@ class InstanceKlass: public Klass { // log class name to classlist void log_to_classlist() const; public: + +#if INCLUDE_CDS // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); virtual void remove_java_mirror(); void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS); void init_shared_package_entry(); bool can_be_verified_at_dumptime() const; +#endif jint compute_modifier_flags() const; diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index 4f2e4dea2a020..56b6650300344 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -537,6 +537,7 @@ void Klass::metaspace_pointers_do(MetaspaceClosure* it) { } } +#if INCLUDE_CDS void Klass::remove_unshareable_info() { assert (Arguments::is_dumping_archive(), "only called during CDS dump time"); @@ -627,6 +628,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec java_lang_Class::create_mirror(this, loader, module_handle, protection_domain, Handle(), CHECK); } } +#endif // INCLUDE_CDS #if INCLUDE_CDS_JAVA_HEAP oop Klass::archived_java_mirror() { diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index 845f949740596..d624acbcb15bc 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -545,7 +545,9 @@ class Klass : public Metadata { void set_vtable_length(int len) { _vtable_len= len; } vtableEntry* start_of_vtable() const; +#if INCLUDE_CDS void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS); +#endif public: Method* method_at_vtable(int index); @@ -554,6 +556,7 @@ class Klass : public Metadata { return byte_offset_of(Klass, _vtable_len); } +#if INCLUDE_CDS // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); virtual void remove_java_mirror(); @@ -569,6 +572,7 @@ class Klass : public Metadata { return true; } } +#endif // INCLUDE_CDS public: // ALL FUNCTIONS BELOW THIS POINT ARE DISPATCHED FROM AN OOP diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 73975cd31a7cc..b70f8c74f4ae1 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -401,6 +401,7 @@ void Method::metaspace_pointers_do(MetaspaceClosure* it) { NOT_PRODUCT(it->push(&_name);) } +#if INCLUDE_CDS // Attempt to return method to original state. Clear any pointers // (to objects outside the shared spaces). We won't be able to predict // where they should point in a new JVM. Further initialize some @@ -411,6 +412,11 @@ void Method::remove_unshareable_info() { JFR_ONLY(REMOVE_METHOD_ID(this);) } +void Method::restore_unshareable_info(TRAPS) { + assert(is_method() && is_valid_method(this), "ensure C++ vtable is restored"); +} +#endif + void Method::set_vtable_index(int index) { if (is_shared() && !MetaspaceShared::remapped_readwrite() && method_holder()->verified_at_dump_time()) { // At runtime initialize_vtable is rerun as part of link_class_impl() @@ -639,10 +645,6 @@ MethodCounters* Method::build_method_counters(Thread* current, Method* m) { MetadataFactory::free_metadata(mh->method_holder()->class_loader_data(), counters); } - if (LogTouchedMethods) { - mh->log_touched(current); - } - return mh->method_counters(); } @@ -971,7 +973,7 @@ bool Method::is_klass_loaded_by_klass_index(int klass_index) const { Symbol* klass_name = constants()->klass_name_at(klass_index); Handle loader(thread, method_holder()->class_loader()); Handle prot (thread, method_holder()->protection_domain()); - return SystemDictionary::find_instance_klass(klass_name, loader, prot) != NULL; + return SystemDictionary::find_instance_klass(thread, klass_name, loader, prot) != NULL; } else { return true; } @@ -1165,9 +1167,6 @@ void Method::unlink_code(CompiledMethod *compare) { // We need to check if either the _code or _from_compiled_code_entry_point // refer to this nmethod because there is a race in setting these two fields // in Method* as seen in bugid 4947125. - // If the vep() points to the zombie nmethod, the memory for the nmethod - // could be flushed and the compiler and vtable stubs could still call - // through it. if (code() == compare || from_compiled_entry() == compare->verified_entry_point()) { clear_code(); @@ -1266,10 +1265,6 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) { return adapter->get_c2i_entry(); } -void Method::restore_unshareable_info(TRAPS) { - assert(is_method() && is_valid_method(this), "ensure C++ vtable is restored"); -} - address Method::from_compiled_entry_no_trampoline() const { CompiledMethod *code = Atomic::load_acquire(&_code); if (code) { @@ -2448,85 +2443,6 @@ void Method::print_value_on(outputStream* st) const { if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code()); } -// LogTouchedMethods and PrintTouchedMethods - -// TouchedMethodRecord -- we can't use a HashtableEntry because -// the Method may be garbage collected. Let's roll our own hash table. -class TouchedMethodRecord : CHeapObj { -public: - // It's OK to store Symbols here because they will NOT be GC'ed if - // LogTouchedMethods is enabled. - TouchedMethodRecord* _next; - Symbol* _class_name; - Symbol* _method_name; - Symbol* _method_signature; -}; - -static const int TOUCHED_METHOD_TABLE_SIZE = 20011; -static TouchedMethodRecord** _touched_method_table = NULL; - -void Method::log_touched(Thread* current) { - - const int table_size = TOUCHED_METHOD_TABLE_SIZE; - Symbol* my_class = klass_name(); - Symbol* my_name = name(); - Symbol* my_sig = signature(); - - unsigned int hash = my_class->identity_hash() + - my_name->identity_hash() + - my_sig->identity_hash(); - juint index = juint(hash) % table_size; - - MutexLocker ml(current, TouchedMethodLog_lock); - if (_touched_method_table == NULL) { - _touched_method_table = NEW_C_HEAP_ARRAY2(TouchedMethodRecord*, table_size, - mtTracing, CURRENT_PC); - memset(_touched_method_table, 0, sizeof(TouchedMethodRecord*)*table_size); - } - - TouchedMethodRecord* ptr = _touched_method_table[index]; - while (ptr) { - if (ptr->_class_name == my_class && - ptr->_method_name == my_name && - ptr->_method_signature == my_sig) { - return; - } - if (ptr->_next == NULL) break; - ptr = ptr->_next; - } - TouchedMethodRecord* nptr = NEW_C_HEAP_OBJ(TouchedMethodRecord, mtTracing); - my_class->increment_refcount(); - my_name->increment_refcount(); - my_sig->increment_refcount(); - nptr->_class_name = my_class; - nptr->_method_name = my_name; - nptr->_method_signature = my_sig; - nptr->_next = NULL; - - if (ptr == NULL) { - // first - _touched_method_table[index] = nptr; - } else { - ptr->_next = nptr; - } -} - -void Method::print_touched_methods(outputStream* out) { - MutexLocker ml(Thread::current()->is_VM_thread() ? NULL : TouchedMethodLog_lock); - out->print_cr("# Method::print_touched_methods version 1"); - if (_touched_method_table) { - for (int i = 0; i < TOUCHED_METHOD_TABLE_SIZE; i++) { - TouchedMethodRecord* ptr = _touched_method_table[i]; - while(ptr) { - ptr->_class_name->print_symbol_on(out); out->print("."); - ptr->_method_name->print_symbol_on(out); out->print(":"); - ptr->_method_signature->print_symbol_on(out); out->cr(); - ptr = ptr->_next; - } - } - } -} - // Verification void Method::verify_on(outputStream* st) { diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 9f10ad219b235..8855032a25abc 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -134,7 +134,10 @@ class Method : public Metadata { virtual bool is_method() const { return true; } +#if INCLUDE_CDS + void remove_unshareable_info(); void restore_unshareable_info(TRAPS); +#endif // accessors for instance variables @@ -404,14 +407,6 @@ class Method : public Metadata { } } - int nmethod_age() const { - if (method_counters() == NULL) { - return INT_MAX; - } else { - return method_counters()->nmethod_age(); - } - } - int invocation_count() const; int backedge_count() const; @@ -432,10 +427,6 @@ class Method : public Metadata { int64_t compiled_invocation_count() const { return 0; } #endif // not PRODUCT - // Clear (non-shared space) pointers which could not be relevant - // if this (shared) method were mapped into another JVM. - void remove_unshareable_info(); - // nmethod/verified compiler entry address verified_code_entry(); bool check_code() const; // Not inline to avoid circular ref diff --git a/src/hotspot/share/oops/methodCounters.cpp b/src/hotspot/share/oops/methodCounters.cpp index 67b2ef96060a4..e9237b18e6009 100644 --- a/src/hotspot/share/oops/methodCounters.cpp +++ b/src/hotspot/share/oops/methodCounters.cpp @@ -30,7 +30,6 @@ MethodCounters::MethodCounters(const methodHandle& mh) : _prev_time(0), _rate(0), - _nmethod_age(INT_MAX), _highest_comp_level(0), _highest_osr_comp_level(0) { @@ -39,10 +38,6 @@ MethodCounters::MethodCounters(const methodHandle& mh) : invocation_counter()->init(); backedge_counter()->init(); - if (StressCodeAging) { - set_nmethod_age(HotMethodDetectionLimit); - } - // Set per-method thresholds. double scale = 1.0; CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale); @@ -65,7 +60,6 @@ void MethodCounters::clear_counters() { invocation_counter()->reset(); backedge_counter()->reset(); set_interpreter_throwout_count(0); - set_nmethod_age(INT_MAX); set_prev_time(0); set_prev_event_count(0); set_rate(0); diff --git a/src/hotspot/share/oops/methodCounters.hpp b/src/hotspot/share/oops/methodCounters.hpp index 2698ce3d3ced3..aac1cd834dbe7 100644 --- a/src/hotspot/share/oops/methodCounters.hpp +++ b/src/hotspot/share/oops/methodCounters.hpp @@ -39,7 +39,6 @@ class MethodCounters : public Metadata { InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequency-based optimizations jlong _prev_time; // Previous time the rate was acquired float _rate; // Events (invocation and backedge counter increments) per millisecond - int _nmethod_age; int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog int _prev_event_count; // Total number of events saved at previous callback @@ -49,14 +48,6 @@ class MethodCounters : public Metadata { #if INCLUDE_JVMTI u2 _number_of_breakpoints; // fullspeed debugging support #endif - // NMethod age is a counter for warm methods detection in the code cache sweeper. - // The counter is reset by the sweeper and is decremented by some of the compiled - // code. The counter values are interpreted as follows: - // 1. (HotMethodDetection..INT_MAX] - initial value, no counters inserted - // 2. [1..HotMethodDetectionLimit) - the method is warm, the counter is used - // to figure out which methods can be flushed. - // 3. (INT_MIN..0] - method is hot and will deopt and get - // recompiled without the counters u1 _highest_comp_level; // Highest compile level this method has ever seen. u1 _highest_osr_comp_level; // Same for OSR level @@ -122,24 +113,6 @@ class MethodCounters : public Metadata { InvocationCounter* invocation_counter() { return &_invocation_counter; } InvocationCounter* backedge_counter() { return &_backedge_counter; } - int nmethod_age() { - return _nmethod_age; - } - void set_nmethod_age(int age) { - _nmethod_age = age; - } - void reset_nmethod_age() { - set_nmethod_age(HotMethodDetectionLimit); - } - - static bool is_nmethod_hot(int age) { return age <= 0; } - static bool is_nmethod_warm(int age) { return age < HotMethodDetectionLimit; } - static bool is_nmethod_age_unset(int age) { return age > HotMethodDetectionLimit; } - - static ByteSize nmethod_age_offset() { - return byte_offset_of(MethodCounters, _nmethod_age); - } - static ByteSize invocation_counter_offset() { return byte_offset_of(MethodCounters, _invocation_counter); } diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index d7d947ca2589e..caf01f63e4d7c 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -1017,7 +1017,6 @@ void Compile::Init(int aliaslevel) { set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());}) - set_age_code(has_method() && method()->profile_aging()); set_rtm_state(NoRTM); // No RTM lock eliding by default _max_node_limit = _directive->MaxNodeLimitOption; diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp index a622f13694480..3d4863f5c0bfd 100644 --- a/src/hotspot/share/opto/compile.hpp +++ b/src/hotspot/share/opto/compile.hpp @@ -327,7 +327,6 @@ class Compile : public Phase { bool _do_freq_based_layout; // True if we intend to do frequency based block layout bool _do_vector_loop; // True if allowed to execute loop in parallel iterations bool _use_cmove; // True if CMove should be used without profitability analysis - bool _age_code; // True if we need to profile code age (decrement the aging counter) int _AliasLevel; // Locally-adjusted version of AliasLevel flag. bool _print_assembly; // True if we should dump assembly code for this compilation bool _print_inlining; // True if we should print inlining for this compilation @@ -617,8 +616,6 @@ class Compile : public Phase { void set_do_vector_loop(bool z) { _do_vector_loop = z; } bool use_cmove() const { return _use_cmove; } void set_use_cmove(bool z) { _use_cmove = z; } - bool age_code() const { return _age_code; } - void set_age_code(bool z) { _age_code = z; } int AliasLevel() const { return _AliasLevel; } bool print_assembly() const { return _print_assembly; } void set_print_assembly(bool z) { _print_assembly = z; } diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp index 787931782ae99..4843671e22548 100644 --- a/src/hotspot/share/opto/loopopts.cpp +++ b/src/hotspot/share/opto/loopopts.cpp @@ -2160,11 +2160,18 @@ static void clone_outer_loop_helper(Node* n, const IdealLoopTree *loop, const Id Node* c = phase->get_ctrl(u); IdealLoopTree* u_loop = phase->get_loop(c); assert(!loop->is_member(u_loop), "can be in outer loop or out of both loops only"); - if (outer_loop->is_member(u_loop) || - // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of - // the outer loop too - (u->in(0) != NULL && outer_loop->is_member(phase->get_loop(u->in(0))))) { + if (outer_loop->is_member(u_loop)) { wq.push(u); + } else { + // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of + // the outer loop too + Node* u_c = u->in(0); + if (u_c != NULL) { + IdealLoopTree* u_c_loop = phase->get_loop(u_c); + if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) { + wq.push(u); + } + } } } } diff --git a/src/hotspot/share/opto/mulnode.cpp b/src/hotspot/share/opto/mulnode.cpp index 9f2fb9a3d1a8f..1a8c7a32f4a6d 100644 --- a/src/hotspot/share/opto/mulnode.cpp +++ b/src/hotspot/share/opto/mulnode.cpp @@ -239,18 +239,18 @@ MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) { //------------------------------Ideal------------------------------------------ // Check for power-of-2 multiply, then try the regular MulNode::Ideal Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) { - // Swap constant to right - jint con; - if ((con = in(1)->find_int_con(0)) != 0) { - swap_edges(1, 2); - // Finish rest of method to use info in 'con' - } else if ((con = in(2)->find_int_con(0)) == 0) { + const jint con = in(2)->find_int_con(0); + if (con == 0) { + // If in(2) is not a constant, call Ideal() of the parent class to + // try to move constant to the right side. return MulNode::Ideal(phase, can_reshape); } - // Now we have a constant Node on the right and the constant in con - if (con == 0) return NULL; // By zero is handled by Value call - if (con == 1) return NULL; // By one is handled by Identity call + // Now we have a constant Node on the right and the constant in con. + if (con == 1) { + // By one is handled by Identity call + return NULL; + } // Check for negative constant; if so negate the final result bool sign_flip = false; @@ -262,7 +262,7 @@ Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Get low bit; check for being the only bit Node *res = NULL; - unsigned int bit1 = abs_con & (0-abs_con); // Extract low bit + unsigned int bit1 = submultiple_power_of_2(abs_con); if (bit1 == abs_con) { // Found a power of 2? res = new LShiftINode(in(1), phase->intcon(log2i_exact(bit1))); } else { @@ -334,18 +334,18 @@ const Type *MulINode::mul_ring(const Type *t0, const Type *t1) const { //------------------------------Ideal------------------------------------------ // Check for power-of-2 multiply, then try the regular MulNode::Ideal Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) { - // Swap constant to right - jlong con; - if ((con = in(1)->find_long_con(0)) != 0) { - swap_edges(1, 2); - // Finish rest of method to use info in 'con' - } else if ((con = in(2)->find_long_con(0)) == 0) { + const jlong con = in(2)->find_long_con(0); + if (con == 0) { + // If in(2) is not a constant, call Ideal() of the parent class to + // try to move constant to the right side. return MulNode::Ideal(phase, can_reshape); } - // Now we have a constant Node on the right and the constant in con - if (con == CONST64(0)) return NULL; // By zero is handled by Value call - if (con == CONST64(1)) return NULL; // By one is handled by Identity call + // Now we have a constant Node on the right and the constant in con. + if (con == 1) { + // By one is handled by Identity call + return NULL; + } // Check for negative constant; if so negate the final result bool sign_flip = false; @@ -356,7 +356,7 @@ Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Get low bit; check for being the only bit Node *res = NULL; - julong bit1 = abs_con & (0-abs_con); // Extract low bit + julong bit1 = submultiple_power_of_2(abs_con); if (bit1 == abs_con) { // Found a power of 2? res = new LShiftLNode(in(1), phase->intcon(log2i_exact(bit1))); } else { diff --git a/src/hotspot/share/opto/output.hpp b/src/hotspot/share/opto/output.hpp index 7393f4facec34..713fbd14f808e 100644 --- a/src/hotspot/share/opto/output.hpp +++ b/src/hotspot/share/opto/output.hpp @@ -118,7 +118,7 @@ class C2SafepointPollStubTable { class C2EntryBarrierStub: public ResourceObj { Label _slow_path; Label _continuation; - Label _guard; // Used on AArch64 + Label _guard; // Used on AArch64 and RISCV public: C2EntryBarrierStub() : diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp index 14724ed6995c5..d4c9f75b09112 100644 --- a/src/hotspot/share/opto/parse.hpp +++ b/src/hotspot/share/opto/parse.hpp @@ -561,8 +561,6 @@ class Parse : public GraphKit { bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi); - void decrement_age(); - // helper function for call statistics void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index 6c171da7c767c..f3b7668df007e 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -577,9 +577,6 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) } else { set_map(entry_map); do_method_entry(); - if (depth() == 1 && C->age_code()) { - decrement_age(); - } } if (depth() == 1 && !failing()) { @@ -2174,31 +2171,6 @@ void Parse::rtm_deopt() { #endif } -void Parse::decrement_age() { - MethodCounters* mc = method()->ensure_method_counters(); - if (mc == NULL) { - C->record_failure("Must have MCs"); - return; - } - assert(!is_osr_parse(), "Not doing this for OSRs"); - - // Set starting bci for uncommon trap. - set_parse_bci(0); - - const TypePtr* adr_type = TypeRawPtr::make((address)mc); - Node* mc_adr = makecon(adr_type); - Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset())); - Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); - Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE))); - store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered); - Node *chk = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO))); - Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::gt)); - { BuildCutout unless(this, tst, PROB_ALWAYS); - uncommon_trap(Deoptimization::Reason_tenured, - Deoptimization::Action_make_not_entrant); - } -} - //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp index 902cbed7d24b3..b54b509ebf034 100644 --- a/src/hotspot/share/opto/phaseX.cpp +++ b/src/hotspot/share/opto/phaseX.cpp @@ -1772,6 +1772,10 @@ void PhaseCCP::analyze() { // This loop is the meat of CCP. while (worklist.size() != 0) { Node* n = fetch_next_node(worklist); + if (n->is_SafePoint()) { + // Keep track of SafePoint nodes for PhaseCCP::transform() + _safepoints.push(n); + } const Type* new_type = n->Value(this); if (new_type != type(n)) { assert(ccp_type_widens(new_type, type(n)), "ccp type must widen"); @@ -1955,6 +1959,23 @@ Node *PhaseCCP::transform( Node *n ) { GrowableArray trstack(C->live_nodes() >> 1); trstack.push(new_node); // Process children of cloned node + + // This CCP pass may prove that no exit test for a loop ever succeeds (i.e. the loop is infinite). In that case, + // the logic below doesn't follow any path from Root to the loop body: there's at least one such path but it's proven + // never taken (its type is TOP). As a consequence the node on the exit path that's input to Root (let's call it n) is + // replaced by the top node and the inputs of that node n are not enqueued for further processing. If CCP only works + // through the graph from Root, this causes the loop body to never be processed here even when it's not dead (that + // is reachable from Root following its uses). To prevent that issue, transform() starts walking the graph from Root + // and all safepoints. + for (uint i = 0; i < _safepoints.size(); ++i) { + Node* nn = _safepoints.at(i); + Node* new_node = _nodes[nn->_idx]; + assert(new_node == NULL, ""); + new_node = transform_once(nn); + _nodes.map(nn->_idx, new_node); + trstack.push(new_node); + } + while ( trstack.is_nonempty() ) { Node *clone = trstack.pop(); uint cnt = clone->req(); diff --git a/src/hotspot/share/opto/phaseX.hpp b/src/hotspot/share/opto/phaseX.hpp index d2637a8102fd8..d273485ba0cda 100644 --- a/src/hotspot/share/opto/phaseX.hpp +++ b/src/hotspot/share/opto/phaseX.hpp @@ -566,6 +566,7 @@ class PhaseIterGVN : public PhaseGVN { // Phase for performing global Conditional Constant Propagation. // Should be replaced with combined CCP & GVN someday. class PhaseCCP : public PhaseIterGVN { + Unique_Node_List _safepoints; // Non-recursive. Use analysis to transform single Node. virtual Node* transform_once(Node* n); diff --git a/src/hotspot/share/opto/subnode.cpp b/src/hotspot/share/opto/subnode.cpp index a687ea2b6cbb7..292717bdd1a31 100644 --- a/src/hotspot/share/opto/subnode.cpp +++ b/src/hotspot/share/opto/subnode.cpp @@ -38,6 +38,7 @@ #include "opto/phaseX.hpp" #include "opto/subnode.hpp" #include "runtime/sharedRuntime.hpp" +#include "utilities/moveBits.hpp" // Portions of code courtesy of Clifford Click @@ -1900,13 +1901,6 @@ const Type* SqrtFNode::Value(PhaseGVN* phase) const { return TypeF::make( (float)sqrt( (double)f ) ); } -static jlong reverse_bits(jlong val) { - jlong res = ((val & 0xF0F0F0F0F0F0F0F0L) >> 4) | ((val & 0x0F0F0F0F0F0F0F0F) << 4); - res = ((res & 0xCCCCCCCCCCCCCCCCL) >> 2) | ((res & 0x3333333333333333L) << 2); - res = ((res & 0xAAAAAAAAAAAAAAAAL) >> 1) | ((res & 0x5555555555555555L) << 1); - return res; -} - const Type* ReverseINode::Value(PhaseGVN* phase) const { const Type *t1 = phase->type( in(1) ); if (t1 == Type::TOP) { @@ -1917,7 +1911,7 @@ const Type* ReverseINode::Value(PhaseGVN* phase) const { jint res = reverse_bits(t1int->get_con()); return TypeInt::make(res); } - return t1int; + return bottom_type(); } const Type* ReverseLNode::Value(PhaseGVN* phase) const { @@ -1927,10 +1921,10 @@ const Type* ReverseLNode::Value(PhaseGVN* phase) const { } const TypeLong* t1long = t1->isa_long(); if (t1long && t1long->is_con()) { - jint res = reverse_bits(t1long->get_con()); + jlong res = reverse_bits(t1long->get_con()); return TypeLong::make(res); } - return t1long; + return bottom_type(); } Node* ReverseINode::Identity(PhaseGVN* phase) { diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index 3a0fb784c31c4..a2e464d2f27cd 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -1095,9 +1095,9 @@ JVM_ENTRY(jclass, JVM_FindLoadedClass(JNIEnv *env, jobject loader, jstring name) // The Java level wrapper will perform the necessary security check allowing // us to pass the NULL as the initiating class loader. Handle h_loader(THREAD, JNIHandles::resolve(loader)); - Klass* k = SystemDictionary::find_instance_or_array_klass(klass_name, - h_loader, - Handle()); + Klass* k = SystemDictionary::find_instance_or_array_klass(THREAD, klass_name, + h_loader, + Handle()); #if INCLUDE_CDS if (k == NULL) { // If the class is not already loaded, try to see if it's in the shared diff --git a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp index acf15956de8b1..44d6bee6609b3 100644 --- a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp +++ b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp @@ -234,7 +234,7 @@ jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* e // Save events to the queue for posting outside the CodeCache_lock. MutexLocker mu(java_thread, CodeCache_lock, Mutex::_no_safepoint_check_flag); // Iterate over non-profiled and profiled nmethods - NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { nmethod* current = iter.method(); current->post_compiled_method_load_event(state); diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index c0a70b7282c54..a9efe01c92eff 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -2246,7 +2246,7 @@ JvmtiEnv::GetLocalObject(jthread thread, jint depth, jint slot, jobject* value_p JvmtiVTMSTransitionDisabler disabler; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), current_thread, depth, slot); @@ -2286,7 +2286,7 @@ JvmtiEnv::GetLocalInstance(jthread thread, jint depth, jobject* value_ptr){ JvmtiVTMSTransitionDisabler disabler; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetReceiver op(this, Handle(current_thread, thread_obj), current_thread, depth); @@ -2327,7 +2327,7 @@ JvmtiEnv::GetLocalInt(jthread thread, jint depth, jint slot, jint* value_ptr) { JvmtiVTMSTransitionDisabler disabler; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_INT); @@ -2368,7 +2368,7 @@ JvmtiEnv::GetLocalLong(jthread thread, jint depth, jint slot, jlong* value_ptr) JvmtiVTMSTransitionDisabler disabler; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_LONG); @@ -2409,7 +2409,7 @@ JvmtiEnv::GetLocalFloat(jthread thread, jint depth, jint slot, jfloat* value_ptr JvmtiVTMSTransitionDisabler disabler; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_FLOAT); @@ -2450,7 +2450,7 @@ JvmtiEnv::GetLocalDouble(jthread thread, jint depth, jint slot, jdouble* value_p JvmtiVTMSTransitionDisabler disabler; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_DOUBLE); @@ -2492,7 +2492,7 @@ JvmtiEnv::SetLocalObject(jthread thread, jint depth, jint slot, jobject value) { val.l = value; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_OBJECT, val); @@ -2528,7 +2528,7 @@ JvmtiEnv::SetLocalInt(jthread thread, jint depth, jint slot, jint value) { val.i = value; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_INT, val); @@ -2564,7 +2564,7 @@ JvmtiEnv::SetLocalLong(jthread thread, jint depth, jint slot, jlong value) { val.j = value; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_LONG, val); @@ -2600,7 +2600,7 @@ JvmtiEnv::SetLocalFloat(jthread thread, jint depth, jint slot, jfloat value) { val.f = value; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_FLOAT, val); @@ -2636,7 +2636,7 @@ JvmtiEnv::SetLocalDouble(jthread thread, jint depth, jint slot, jdouble value) { val.d = value; jvmtiError err = JVMTI_ERROR_NONE; - oop thread_obj = JNIHandles::resolve_external_guard(thread); + oop thread_obj = current_thread_obj_or_resolve_external_guard(thread); if (java_lang_VirtualThread::is_instance(thread_obj)) { VM_VirtualThreadGetOrSetLocal op(this, Handle(current_thread, thread_obj), depth, slot, T_DOUBLE, val); diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp index 99798462537cc..11f65ea941de3 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp @@ -1310,6 +1310,17 @@ JvmtiEnvBase::is_cthread_with_continuation(JavaThread* jt) { return cont_entry != NULL && is_cthread_with_mounted_vthread(jt); } +// If (thread == NULL) then return current thread object. +// Otherwise return JNIHandles::resolve_external_guard(thread). +oop +JvmtiEnvBase::current_thread_obj_or_resolve_external_guard(jthread thread) { + oop thread_obj = JNIHandles::resolve_external_guard(thread); + if (thread == NULL) { + thread_obj = get_vthread_or_thread_oop(JavaThread::current()); + } + return thread_obj; +} + jvmtiError JvmtiEnvBase::get_threadOop_and_JavaThread(ThreadsList* t_list, jthread thread, JavaThread** jt_pp, oop* thread_oop_p) { diff --git a/src/hotspot/share/prims/jvmtiEnvBase.hpp b/src/hotspot/share/prims/jvmtiEnvBase.hpp index 98e8feca789dc..6ab9dfe629fac 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.hpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp @@ -167,6 +167,10 @@ class JvmtiEnvBase : public CHeapObj { return byte_offset_of(JvmtiEnvBase, _jvmti_external); }; + // If (thread == NULL) then return current thread object. + // Otherwise return JNIHandles::resolve_external_guard(thread). + static oop current_thread_obj_or_resolve_external_guard(jthread thread); + static jvmtiError get_JavaThread(ThreadsList* tlist, jthread thread, JavaThread** jt_pp) { jvmtiError err = JVMTI_ERROR_NONE; if (thread == NULL) { diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index cb85ddc3b5878..fb9439588ef8f 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -2462,7 +2462,6 @@ void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, nmethod *nm) { ResourceMark rm(thread); HandleMark hm(thread); - assert(!nm->is_zombie(), "nmethod zombie in post_compiled_method_load"); // Add inlining information jvmtiCompiledMethodLoadInlineRecord* inlinerecord = create_inline_record(nm); // Pass inlining information through the void pointer diff --git a/src/hotspot/share/prims/jvmtiExtensions.cpp b/src/hotspot/share/prims/jvmtiExtensions.cpp index 99d6c542bcb1c..8fcb1a64e62f4 100644 --- a/src/hotspot/share/prims/jvmtiExtensions.cpp +++ b/src/hotspot/share/prims/jvmtiExtensions.cpp @@ -136,6 +136,10 @@ static jvmtiError JNICALL GetCarrierThread(const jvmtiEnv* env, ...) { ThreadsListHandle tlh(current_thread); JavaThread* java_thread; oop vthread_oop = NULL; + + if (vthread == NULL) { + vthread = (jthread)JNIHandles::make_local(current_thread, JvmtiEnvBase::get_vthread_or_thread_oop(current_thread)); + } jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), vthread, &java_thread, &vthread_oop); if (err != JVMTI_ERROR_NONE) { // We got an error code so we don't have a JavaThread *, but diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp index bb985ac6d85e4..2f7d94032aaae 100644 --- a/src/hotspot/share/prims/jvmtiImpl.cpp +++ b/src/hotspot/share/prims/jvmtiImpl.cpp @@ -1018,8 +1018,8 @@ void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) { } } -// The sweeper calls this and marks the nmethods here on the stack so that -// they cannot be turned into zombies while in the queue. +// The GC calls this and marks the nmethods here on the stack so that +// they cannot be unloaded while in the queue. void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) { if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) { cf->do_code_blob(_event_data.compiled_method_load); @@ -1076,7 +1076,7 @@ JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() { } void JvmtiDeferredEventQueue::post(JvmtiEnv* env) { - // Post events while nmethods are still in the queue and can't be unloaded or made zombie + // Post events while nmethods are still in the queue and can't be unloaded. while (_queue_head != NULL) { _queue_head->event().post_compiled_method_load_event(env); dequeue(); diff --git a/src/hotspot/share/prims/jvmtiImpl.hpp b/src/hotspot/share/prims/jvmtiImpl.hpp index 29cc2c87a86c2..9e75ccc57b7ab 100644 --- a/src/hotspot/share/prims/jvmtiImpl.hpp +++ b/src/hotspot/share/prims/jvmtiImpl.hpp @@ -500,7 +500,7 @@ class JvmtiDeferredEvent { void post() NOT_JVMTI_RETURN; void post_compiled_method_load_event(JvmtiEnv* env) NOT_JVMTI_RETURN; void run_nmethod_entry_barriers() NOT_JVMTI_RETURN; - // Sweeper support to keep nmethods from being zombied while in the queue. + // GC support to keep nmethods from unloading while in the queue. void nmethods_do(CodeBlobClosure* cf) NOT_JVMTI_RETURN; // GC support to keep nmethod from being unloaded while in the queue. void oops_do(OopClosure* f, CodeBlobClosure* cf) NOT_JVMTI_RETURN; @@ -543,7 +543,7 @@ class JvmtiDeferredEventQueue : public CHeapObj { void enqueue(JvmtiDeferredEvent event) NOT_JVMTI_RETURN; void run_nmethod_entry_barriers(); - // Sweeper support to keep nmethods from being zombied while in the queue. + // GC support to keep nmethods from unloading while in the queue. void nmethods_do(CodeBlobClosure* cf) NOT_JVMTI_RETURN; // GC support to keep nmethod from being unloaded while in the queue. void oops_do(OopClosure* f, CodeBlobClosure* cf) NOT_JVMTI_RETURN; diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp index e365c96dc9cfb..2b43b008cd49a 100644 --- a/src/hotspot/share/prims/methodHandles.cpp +++ b/src/hotspot/share/prims/methodHandles.cpp @@ -1055,14 +1055,6 @@ void MethodHandles::add_dependent_nmethod(oop call_site, nmethod* nm) { deps.add_dependent_nmethod(nm); } -void MethodHandles::remove_dependent_nmethod(oop call_site, nmethod* nm) { - assert_locked_or_safepoint(CodeCache_lock); - - oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site); - DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context); - deps.remove_dependent_nmethod(nm); -} - void MethodHandles::clean_dependency_context(oop call_site) { oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site); DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context); diff --git a/src/hotspot/share/prims/methodHandles.hpp b/src/hotspot/share/prims/methodHandles.hpp index ad8c935c64bb2..49348a19d919c 100644 --- a/src/hotspot/share/prims/methodHandles.hpp +++ b/src/hotspot/share/prims/methodHandles.hpp @@ -80,7 +80,6 @@ class MethodHandles: AllStatic { // CallSite support static void add_dependent_nmethod(oop call_site, nmethod* nm); - static void remove_dependent_nmethod(oop call_site, nmethod* nm); static void clean_dependency_context(oop call_site); static void flush_dependent_nmethods(Handle call_site, Handle target); diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index f100f48de3b92..27e9feb1e19e5 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -82,7 +82,6 @@ #include "runtime/jniHandles.inline.hpp" #include "runtime/os.hpp" #include "runtime/stackFrameStream.inline.hpp" -#include "runtime/sweeper.hpp" #include "runtime/synchronizer.hpp" #include "runtime/threadSMR.hpp" #include "runtime/vframe.hpp" @@ -809,7 +808,7 @@ WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, j if (code == NULL) { return JNI_FALSE; } - return (code->is_alive() && !code->is_marked_for_deoptimization()); + return !code->is_marked_for_deoptimization(); WB_END static bool is_excluded_for_compiler(AbstractCompiler* comp, methodHandle& mh) { @@ -1420,11 +1419,6 @@ WB_ENTRY(void, WB_UnlockCompilation(JNIEnv* env, jobject o)) mo.notify_all(); WB_END -WB_ENTRY(void, WB_ForceNMethodSweep(JNIEnv* env, jobject o)) - // Force a code cache sweep and block until it finished - NMethodSweeper::force_sweep(); -WB_END - WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString)) ResourceMark rm(THREAD); int len; @@ -2659,7 +2653,6 @@ static JNINativeMethod methods[] = { {CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures }, {CC"getNMethod0", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;", (void*)&WB_GetNMethod }, - {CC"forceNMethodSweep", CC"()V", (void*)&WB_ForceNMethodSweep }, {CC"allocateCodeBlob", CC"(II)J", (void*)&WB_AllocateCodeBlob }, {CC"freeCodeBlob", CC"(J)V", (void*)&WB_FreeCodeBlob }, {CC"getCodeHeapEntries", CC"(I)[Ljava/lang/Object;",(void*)&WB_GetCodeHeapEntries }, diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index af98cef64892e..01ff707bff445 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -555,6 +555,7 @@ static SpecialFlag const special_jvm_flags[] = { { "UseContainerCpuShares", JDK_Version::jdk(19), JDK_Version::jdk(20), JDK_Version::jdk(21) }, { "PreferContainerQuotaForCPUCount", JDK_Version::jdk(19), JDK_Version::jdk(20), JDK_Version::jdk(21) }, { "AliasLevel", JDK_Version::jdk(19), JDK_Version::jdk(20), JDK_Version::jdk(21) }, + { "UseCodeAging", JDK_Version::undefined(), JDK_Version::jdk(20), JDK_Version::jdk(21) }, #ifdef ASSERT { "DummyObsoleteTestFlag", JDK_Version::undefined(), JDK_Version::jdk(18), JDK_Version::undefined() }, @@ -3885,11 +3886,13 @@ static void apply_debugger_ergo() { } #endif +#ifndef PRODUCT if (UseDebuggerErgo) { // Turn on sub-flags FLAG_SET_ERGO_IF_DEFAULT(UseDebuggerErgo1, true); FLAG_SET_ERGO_IF_DEFAULT(UseDebuggerErgo2, true); } +#endif if (UseDebuggerErgo2) { // Debugging with limited number of CPUs @@ -4152,11 +4155,6 @@ jint Arguments::apply_ergo() { #ifdef ZERO // Clear flags not supported on zero. FLAG_SET_DEFAULT(ProfileInterpreter, false); - - if (LogTouchedMethods) { - warning("LogTouchedMethods is not supported for Zero"); - FLAG_SET_DEFAULT(LogTouchedMethods, false); - } #endif // ZERO if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) { diff --git a/src/hotspot/share/runtime/continuation.cpp b/src/hotspot/share/runtime/continuation.cpp index e8ab655792d09..eebb77d03bfd6 100644 --- a/src/hotspot/share/runtime/continuation.cpp +++ b/src/hotspot/share/runtime/continuation.cpp @@ -422,52 +422,10 @@ void Continuations::init() { } // While virtual threads are in Preview, there are some VM mechanisms we disable if continuations aren't used -// See NMethodSweeper::do_stack_scanning and nmethod::is_not_on_continuation_stack bool Continuations::enabled() { return VMContinuations && Arguments::enable_preview(); } -// We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle -// subtracts the value by 2, and the type is unsigned. We don't want underflow. -// -// Odd values mean that marking is in progress, and even values mean that no -// marking is currently active. -uint64_t Continuations::_gc_epoch = 2; - -uint64_t Continuations::gc_epoch() { - return _gc_epoch; -} - -bool Continuations::is_gc_marking_cycle_active() { - // Odd means that marking is active - return (_gc_epoch % 2) == 1; -} - -uint64_t Continuations::previous_completed_gc_marking_cycle() { - if (is_gc_marking_cycle_active()) { - return _gc_epoch - 2; - } else { - return _gc_epoch - 1; - } -} - -void Continuations::on_gc_marking_cycle_start() { - assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended"); - ++_gc_epoch; -} - -void Continuations::on_gc_marking_cycle_finish() { - assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished"); - ++_gc_epoch; -} - -void Continuations::arm_all_nmethods() { - BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm != NULL) { - bs_nm->arm_all_nmethods(); - } -} - #define CC (char*) /*cast a literal from (const char*)*/ #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) diff --git a/src/hotspot/share/runtime/continuation.hpp b/src/hotspot/share/runtime/continuation.hpp index 8cade260d260e..607dc2fda9a99 100644 --- a/src/hotspot/share/runtime/continuation.hpp +++ b/src/hotspot/share/runtime/continuation.hpp @@ -37,21 +37,9 @@ class outputStream; class RegisterMap; class Continuations : public AllStatic { -private: - static uint64_t _gc_epoch; - public: static void init(); static bool enabled(); // TODO: used while virtual threads are in Preview; remove when GA - - // The GC epoch and marking_cycle code below is there to support sweeping - // nmethods in loom stack chunks. - static uint64_t gc_epoch(); - static bool is_gc_marking_cycle_active(); - static uint64_t previous_completed_gc_marking_cycle(); - static void on_gc_marking_cycle_start(); - static void on_gc_marking_cycle_finish(); - static void arm_all_nmethods(); }; void continuations_init(); diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 5f05fdd6efec5..7ac64d5989577 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -620,7 +620,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread ContinuationEntry::from_frame(deopt_sender)->set_argsize(0); } - assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc"); + assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc"); #if INCLUDE_JVMCI if (exceptionObject() != NULL) { @@ -958,10 +958,10 @@ Deoptimization::DeoptAction Deoptimization::_unloaded_action template class BoxCacheBase : public CHeapObj { protected: - static InstanceKlass* find_cache_klass(Symbol* klass_name) { - ResourceMark rm; + static InstanceKlass* find_cache_klass(Thread* thread, Symbol* klass_name) { + ResourceMark rm(thread); char* klass_name_str = klass_name->as_C_string(); - InstanceKlass* ik = SystemDictionary::find_instance_klass(klass_name, Handle(), Handle()); + InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle(), Handle()); guarantee(ik != NULL, "%s must be loaded", klass_name_str); guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str); CacheType::compute_offsets(ik); @@ -976,7 +976,7 @@ template class Box protected: static BoxCache *_singleton; BoxCache(Thread* thread) { - InstanceKlass* ik = BoxCacheBase::find_cache_klass(CacheType::symbol()); + InstanceKlass* ik = BoxCacheBase::find_cache_klass(thread, CacheType::symbol()); objArrayOop cache = CacheType::cache(ik); assert(cache->length() > 0, "Empty cache"); _low = BoxType::value(cache->obj_at(0)); @@ -1032,7 +1032,7 @@ class BooleanBoxCache : public BoxCacheBase { protected: static BooleanBoxCache *_singleton; BooleanBoxCache(Thread *thread) { - InstanceKlass* ik = find_cache_klass(java_lang_Boolean::symbol()); + InstanceKlass* ik = find_cache_klass(thread, java_lang_Boolean::symbol()); _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik))); _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik))); } @@ -1896,9 +1896,6 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr #endif frame stub_frame = current->last_frame(); frame fr = stub_frame.sender(®_map); - // Make sure the calling nmethod is not getting deoptimized and removed - // before we are done with it. - nmethodLocker nl(fr.pc()); // Log a message Events::log_deopt_message(current, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT, @@ -1955,7 +1952,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr // Ensure that we can record deopt. history: // Need MDO to record RTM code generation state. - bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking ); + bool create_if_missing = ProfileTraps RTM_OPT_ONLY( || UseRTMLocking ); methodHandle profiled_method; #if INCLUDE_JVMCI diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index 30f326c1b2634..7971390116217 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -227,12 +227,12 @@ void frame::set_pc(address newpc ) { // Unsafe to use the is_deoptimized tester after changing pc _deopt_state = unknown; _pc = newpc; - _cb = CodeCache::find_blob_unsafe(_pc); + _cb = CodeCache::find_blob(_pc); } void frame::set_pc_preserve_deopt(address newpc) { - set_pc_preserve_deopt(newpc, CodeCache::find_blob_unsafe(newpc)); + set_pc_preserve_deopt(newpc, CodeCache::find_blob(newpc)); } void frame::set_pc_preserve_deopt(address newpc, CodeBlob* cb) { diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index b50a1ebb5c83e..398e1a7254cc5 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -270,7 +270,7 @@ const int ObjectAlignmentInBytes = 8; "compilation") \ \ product(bool, MethodFlushing, true, \ - "Reclamation of zombie and not-entrant methods") \ + "Reclamation of compiled methods") \ \ develop(bool, VerifyStack, false, \ "Verify stack of each thread when it is entering a runtime call") \ @@ -379,7 +379,7 @@ const int ObjectAlignmentInBytes = 8; "Deoptimize random frames on random exit from the runtime system")\ \ notproduct(bool, ZombieALot, false, \ - "Create zombies (non-entrant) at exit from the runtime system") \ + "Create non-entrant nmethods at exit from the runtime system") \ \ notproduct(bool, WalkStackALot, false, \ "Trace stack (no print) at every exit from the runtime system") \ @@ -977,37 +977,17 @@ const int ObjectAlignmentInBytes = 8; product(bool, UsePopCountInstruction, false, \ "Use population count instruction") \ \ - product(bool, LogTouchedMethods, false, DIAGNOSTIC, \ - "Log methods which have been ever touched in runtime") \ - \ - product(bool, PrintTouchedMethodsAtExit, false, DIAGNOSTIC, \ - "Print all methods that have been ever touched in runtime") \ - \ develop(bool, TraceMethodReplacement, false, \ "Print when methods are replaced do to recompilation") \ \ - develop(bool, PrintMethodFlushing, false, \ - "Print the nmethods being flushed") \ - \ product(bool, PrintMethodFlushingStatistics, false, DIAGNOSTIC, \ "print statistics about method flushing") \ \ - product(intx, HotMethodDetectionLimit, 100000, DIAGNOSTIC, \ - "Number of compiled code invocations after which " \ - "the method is considered as hot by the flusher") \ - range(1, max_jint) \ - \ product(intx, MinPassesBeforeFlush, 10, DIAGNOSTIC, \ "Minimum number of sweeper passes before an nmethod " \ "can be flushed") \ range(0, max_intx) \ \ - product(bool, UseCodeAging, true, \ - "Insert counter to detect warm methods") \ - \ - product(bool, StressCodeAging, false, DIAGNOSTIC, \ - "Start with counters compiled in") \ - \ develop(bool, StressCodeBuffers, false, \ "Exercise code buffer expansion and other rare state changes") \ \ @@ -1098,9 +1078,6 @@ const int ObjectAlignmentInBytes = 8; develop(bool, DebugVtables, false, \ "add debugging code to vtable dispatch") \ \ - develop(bool, TraceCreateZombies, false, \ - "trace creation of zombie nmethods") \ - \ product(bool, RangeCheckElimination, true, \ "Eliminate range checks") \ \ @@ -1317,17 +1294,11 @@ const int ObjectAlignmentInBytes = 8; "Delay in milliseconds for option SafepointTimeout") \ range(0, max_intx LP64_ONLY(/MICROUNITS)) \ \ - product(intx, NmethodSweepActivity, 10, \ + product(intx, NmethodSweepActivity, 4, \ "Removes cold nmethods from code cache if > 0. Higher values " \ "result in more aggressive sweeping") \ range(0, 2000) \ \ - notproduct(bool, LogSweeper, false, \ - "Keep a ring buffer of sweeper activity") \ - \ - notproduct(intx, SweeperLogEntries, 1024, \ - "Number of records in the ring buffer of sweeper activity") \ - \ develop(intx, MallocCatchPtr, -1, \ "Hit breakpoint when mallocing/freeing this pointer") \ \ @@ -1598,8 +1569,8 @@ const int ObjectAlignmentInBytes = 8; product(bool, UseCodeCacheFlushing, true, \ "Remove cold/old nmethods from the code cache") \ \ - product(double, SweeperThreshold, 0.5, \ - "Threshold controlling when code cache sweeper is invoked." \ + product(double, SweeperThreshold, 15.0, \ + "Threshold when a code cache unloading GC is invoked." \ "Value is percentage of ReservedCodeCacheSize.") \ range(0.0, 100.0) \ \ diff --git a/src/hotspot/share/runtime/globals_extension.hpp b/src/hotspot/share/runtime/globals_extension.hpp index fd7b83c22a3d4..2e0683ef18b01 100644 --- a/src/hotspot/share/runtime/globals_extension.hpp +++ b/src/hotspot/share/runtime/globals_extension.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,6 +59,15 @@ enum JVMFlagsEnum : int { #define DEFINE_FLAG_MEMBER_SETTER(type, name, ...) FLAG_MEMBER_SETTER_(type, name) +#ifdef PRODUCT +ALL_FLAGS(IGNORE_FLAG, // develop : declared as const + IGNORE_FLAG, // develop-pd : declared as const + DEFINE_FLAG_MEMBER_SETTER, + DEFINE_FLAG_MEMBER_SETTER, + IGNORE_FLAG, // not-product : is not declared + IGNORE_RANGE, + IGNORE_CONSTRAINT) +#else ALL_FLAGS(DEFINE_FLAG_MEMBER_SETTER, DEFINE_FLAG_MEMBER_SETTER, DEFINE_FLAG_MEMBER_SETTER, @@ -66,6 +75,7 @@ ALL_FLAGS(DEFINE_FLAG_MEMBER_SETTER, DEFINE_FLAG_MEMBER_SETTER, IGNORE_RANGE, IGNORE_CONSTRAINT) +#endif #define FLAG_IS_DEFAULT(name) (JVMFlag::is_default(FLAG_MEMBER_ENUM(name))) #define FLAG_IS_ERGO(name) (JVMFlag::is_ergo(FLAG_MEMBER_ENUM(name))) diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index 8ddddb7acbe9d..6a1704f7249b7 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -68,7 +68,6 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/statSampler.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/sweeper.hpp" #include "runtime/task.hpp" #include "runtime/threads.hpp" #include "runtime/timer.hpp" @@ -294,11 +293,8 @@ void print_statistics() { } // CodeHeap State Analytics. - // Does also call NMethodSweeper::print(tty) if (PrintCodeHeapAnalytics) { CompileBroker::print_heapinfo(NULL, "all", 4096); // details - } else if (PrintMethodFlushingStatistics) { - NMethodSweeper::print(tty); } if (PrintCodeCache2) { @@ -324,10 +320,6 @@ void print_statistics() { ClassLoaderDataGraph::print(); } - if (LogTouchedMethods && PrintTouchedMethodsAtExit) { - Method::print_touched_methods(tty); - } - // Native memory tracking data if (PrintNMTStatistics) { MemTracker::final_report(tty); @@ -366,11 +358,8 @@ void print_statistics() { } // CodeHeap State Analytics. - // Does also call NMethodSweeper::print(tty) if (PrintCodeHeapAnalytics) { CompileBroker::print_heapinfo(NULL, "all", 4096); // details - } else if (PrintMethodFlushingStatistics) { - NMethodSweeper::print(tty); } #ifdef COMPILER2 @@ -388,10 +377,6 @@ void print_statistics() { MetaspaceUtils::print_basic_report(tty, 0); } - if (LogTouchedMethods && PrintTouchedMethodsAtExit) { - Method::print_touched_methods(tty); - } - ThreadsSMRSupport::log_statistics(); } diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index 91b7a930e1f34..9527183b57a9d 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -893,7 +893,7 @@ class JavaThread: public Thread { void oops_do_frames(OopClosure* f, CodeBlobClosure* cf); void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf); - // Sweeper operations + // GC operations virtual void nmethods_do(CodeBlobClosure* cf); // RedefineClasses Support diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 2fb4d8ccb27d2..d384d3a702f9a 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -66,7 +66,6 @@ Mutex* SymbolArena_lock = NULL; Monitor* StringDedup_lock = NULL; Mutex* StringDedupIntern_lock = NULL; Monitor* CodeCache_lock = NULL; -Monitor* CodeSweeper_lock = NULL; Mutex* MethodData_lock = NULL; Mutex* TouchedMethodLog_lock = NULL; Mutex* RetData_lock = NULL; @@ -96,7 +95,6 @@ Monitor* InitCompleted_lock = NULL; Monitor* BeforeExit_lock = NULL; Monitor* Notify_lock = NULL; Mutex* ExceptionCache_lock = NULL; -Mutex* NMethodSweeperStats_lock = NULL; #ifndef PRODUCT Mutex* FullGCALot_lock = NULL; #endif @@ -321,7 +319,6 @@ void mutex_init() { def(ContinuationRelativize_lock , PaddedMonitor, nosafepoint-3); def(CodeHeapStateAnalytics_lock , PaddedMutex , safepoint); - def(NMethodSweeperStats_lock , PaddedMutex , nosafepoint); def(ThreadsSMRDelete_lock , PaddedMonitor, nosafepoint-3); // Holds ConcurrentHashTableResize_lock def(ThreadIdTableCreate_lock , PaddedMutex , safepoint); def(SharedDecoder_lock , PaddedMutex , tty-1); @@ -350,17 +347,16 @@ void mutex_init() { defl(VtableStubs_lock , PaddedMutex , CompiledIC_lock); // Also holds DumpTimeTable_lock defl(CodeCache_lock , PaddedMonitor, VtableStubs_lock); defl(CompiledMethod_lock , PaddedMutex , CodeCache_lock); - defl(CodeSweeper_lock , PaddedMonitor, CompiledMethod_lock); defl(Threads_lock , PaddedMonitor, CompileThread_lock, true); - defl(Heap_lock , PaddedMonitor, MultiArray_lock); defl(Compile_lock , PaddedMutex , MethodCompileQueue_lock); defl(AdapterHandlerLibrary_lock , PaddedMutex , InvokeMethodTable_lock); + defl(Heap_lock , PaddedMonitor, AdapterHandlerLibrary_lock); defl(PerfDataMemAlloc_lock , PaddedMutex , Heap_lock); defl(PerfDataManager_lock , PaddedMutex , Heap_lock); defl(ClassLoaderDataGraph_lock , PaddedMutex , MultiArray_lock); - defl(VMOperation_lock , PaddedMonitor, Compile_lock, true); + defl(VMOperation_lock , PaddedMonitor, Heap_lock, true); defl(ClassInitError_lock , PaddedMonitor, Threads_lock); if (UseG1GC) { diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index e858a87d2c8f8..baeeffcba229e 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -58,7 +58,6 @@ extern Mutex* SymbolArena_lock; // a lock on the symbol table a extern Monitor* StringDedup_lock; // a lock on the string deduplication facility extern Mutex* StringDedupIntern_lock; // a lock on StringTable notification of StringDedup extern Monitor* CodeCache_lock; // a lock on the CodeCache -extern Monitor* CodeSweeper_lock; // a lock used by the sweeper only for wait notify extern Mutex* MethodData_lock; // a lock on installation of method data extern Mutex* TouchedMethodLog_lock; // a lock on allocation of LogExecutedMethods info extern Mutex* RetData_lock; // a lock on installation of RetData inside method data @@ -90,7 +89,6 @@ extern Monitor* InitCompleted_lock; // a lock used to signal thread extern Monitor* BeforeExit_lock; // a lock used to guard cleanups and shutdown hooks extern Monitor* Notify_lock; // a lock used to synchronize the start-up of the vm extern Mutex* ExceptionCache_lock; // a lock used to synchronize exception cache updates -extern Mutex* NMethodSweeperStats_lock; // a lock used to serialize access to sweeper statistics #ifndef PRODUCT extern Mutex* FullGCALot_lock; // a lock to make FullGCALot MT safe diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index b256219a1181c..ff93716607c4b 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -1085,7 +1085,7 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) { } // Check if addr points into a code blob. - CodeBlob* b = CodeCache::find_blob_unsafe(addr); + CodeBlob* b = CodeCache::find_blob(addr); if (b != NULL) { b->dump_for_addr(addr, st, verbose); return; diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index d21bbef627249..2e67d513530ea 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -528,7 +528,7 @@ class os: AllStatic { enum ThreadType { vm_thread, gc_thread, // GC thread - java_thread, // Java, CodeCacheSweeper, JVMTIAgent and Service threads. + java_thread, // Java, JVMTIAgent and Service threads. compiler_thread, watcher_thread, asynclog_thread, // dedicated to flushing logs diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index c1ac777753930..2d31f73a8bd83 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "classfile/classLoaderDataGraph.hpp" -#include "classfile/dictionary.hpp" #include "classfile/stringTable.hpp" #include "classfile/symbolTable.hpp" #include "code/codeCache.hpp" @@ -62,7 +61,6 @@ #include "runtime/stackWatermarkSet.inline.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/sweeper.hpp" #include "runtime/synchronizer.hpp" #include "runtime/threads.hpp" #include "runtime/threadSMR.hpp" @@ -560,13 +558,6 @@ class ParallelCleanupTask : public WorkerTask { } } - if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) { - if (Dictionary::does_any_dictionary_needs_resizing()) { - Tracer t("resizing system dictionaries"); - ClassLoaderDataGraph::resize_dictionaries(); - } - } - if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_LAZY_ROOT_PROCESSING)) { if (_do_lazy_roots) { Tracer t("lazy partial thread root processing"); diff --git a/src/hotspot/share/runtime/safepoint.hpp b/src/hotspot/share/runtime/safepoint.hpp index f323252074244..697feaa66afbd 100644 --- a/src/hotspot/share/runtime/safepoint.hpp +++ b/src/hotspot/share/runtime/safepoint.hpp @@ -74,7 +74,6 @@ class SafepointSynchronize : AllStatic { SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES, SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH, SAFEPOINT_CLEANUP_STRING_TABLE_REHASH, - SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE, SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP, // Leave this one last. SAFEPOINT_CLEANUP_NUM_TASKS diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp index 6b23755f7c329..a3a034d13a4ba 100644 --- a/src/hotspot/share/runtime/serviceThread.cpp +++ b/src/hotspot/share/runtime/serviceThread.cpp @@ -230,7 +230,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { void ServiceThread::enqueue_deferred_event(JvmtiDeferredEvent* event) { MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); - // If you enqueue events before the service thread runs, gc and the sweeper + // If you enqueue events before the service thread runs, gc // cannot keep the nmethod alive. This could be restricted to compiled method // load and unload events, if we wanted to be picky. assert(_instance != NULL, "cannot enqueue events before the service thread runs"); diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 4ee529996741f..b5effe319c4e6 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -1296,7 +1296,6 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. callee = NULL; } - nmethodLocker nl_callee(callee); #ifdef ASSERT address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif @@ -1392,7 +1391,7 @@ methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimize (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); - assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive"); + assert(!caller_nm->is_unloading(), "It should not be unloading"); #ifndef PRODUCT // tracing/debugging/statistics @@ -2300,7 +2299,7 @@ class MethodArityHistogram { static void add_method_to_histogram(nmethod* nm) { Method* method = (nm == NULL) ? NULL : nm->method(); - if ((method != NULL) && nm->is_alive()) { + if (method != NULL) { ArgumentCount args(method->signature()); int arity = args.size() + (method->is_static() ? 0 : 1); int argsize = method->size_of_parameters(); @@ -3011,6 +3010,9 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { ResourceMark rm; nmethod* nm = NULL; + // Check if memory should be freed before allocation + CodeCache::gc_on_allocation(); + assert(method->is_native(), "must be native"); assert(method->is_special_native_intrinsic() || method->has_native_function(), "must have something valid to call!"); diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index 892941ffdf619..920c856d23185 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -498,7 +498,7 @@ class SharedRuntime: AllStatic { jint length, JavaThread* thread); // handle ic miss with caller being compiled code - // wrong method handling (inline cache misses, zombie methods) + // wrong method handling (inline cache misses) static address handle_wrong_method(JavaThread* current); static address handle_wrong_method_abstract(JavaThread* current); static address handle_wrong_method_ic_miss(JavaThread* current); diff --git a/src/hotspot/share/runtime/signature.cpp b/src/hotspot/share/runtime/signature.cpp index a08b0e16eba2e..9600b50a9b0e2 100644 --- a/src/hotspot/share/runtime/signature.cpp +++ b/src/hotspot/share/runtime/signature.cpp @@ -338,6 +338,11 @@ inline int SignatureStream::scan_type(BasicType type) { case T_ARRAY: while ((end < limit) && ((char)base[end] == JVM_SIGNATURE_ARRAY)) { end++; } + // If we discovered only the string of '[', this means something is wrong. + if (end >= limit) { + assert(false, "Invalid type detected"); + return limit; + } _array_prefix = end - _end; // number of '[' chars just skipped if (Signature::has_envelope(base[end])) { tem = (const u1 *) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end); @@ -507,7 +512,7 @@ Klass* SignatureStream::as_klass(Handle class_loader, Handle protection_domain, } else if (failure_mode == CachedOrNull) { NoSafepointVerifier nsv; // no loading, now, we mean it! assert(!HAS_PENDING_EXCEPTION, ""); - k = SystemDictionary::find_instance_klass(name, class_loader, protection_domain); + k = SystemDictionary::find_instance_klass(THREAD, name, class_loader, protection_domain); // SD::find does not trigger loading, so there should be no throws // Still, bad things can happen, so we CHECK_NULL and ask callers // to do likewise. diff --git a/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp b/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp index a9b376a98b2a1..36f81f62f4542 100644 --- a/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp +++ b/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp @@ -206,8 +206,6 @@ inline void StackChunkFrameStream::get_cb() { assert(_cb != nullptr, ""); assert(is_interpreted() || ((is_stub() || is_compiled()) && _cb->frame_size() > 0), ""); - assert(is_interpreted() || cb()->is_alive(), - "not alive - not_entrant: %d zombie: %d unloaded: %d", _cb->is_not_entrant(), _cb->is_zombie(), _cb->is_unloaded()); } template diff --git a/src/hotspot/share/runtime/stubCodeGenerator.cpp b/src/hotspot/share/runtime/stubCodeGenerator.cpp index fb546bc8ebee7..8134258caface 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.cpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.cpp @@ -76,7 +76,7 @@ StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) { StubCodeGenerator::~StubCodeGenerator() { #ifndef PRODUCT CodeBuffer* cbuf = _masm->code(); - CodeBlob* blob = CodeCache::find_blob_unsafe(cbuf->insts()->start()); + CodeBlob* blob = CodeCache::find_blob(cbuf->insts()->start()); if (blob != NULL) { blob->use_remarks(cbuf->asm_remarks()); blob->use_strings(cbuf->dbg_strings()); diff --git a/src/hotspot/share/runtime/sweeper.cpp b/src/hotspot/share/runtime/sweeper.cpp deleted file mode 100644 index b16ec713a6871..0000000000000 --- a/src/hotspot/share/runtime/sweeper.cpp +++ /dev/null @@ -1,680 +0,0 @@ -/* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "code/codeCache.hpp" -#include "code/compiledIC.hpp" -#include "code/icBuffer.hpp" -#include "code/nmethod.hpp" -#include "compiler/compileBroker.hpp" -#include "gc/shared/collectedHeap.hpp" -#include "gc/shared/workerThread.hpp" -#include "jfr/jfrEvents.hpp" -#include "logging/log.hpp" -#include "logging/logStream.hpp" -#include "memory/allocation.inline.hpp" -#include "memory/resourceArea.hpp" -#include "memory/universe.hpp" -#include "oops/method.hpp" -#include "runtime/interfaceSupport.inline.hpp" -#include "runtime/handshake.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.hpp" -#include "runtime/os.hpp" -#include "runtime/sweeper.hpp" -#include "runtime/vmOperations.hpp" -#include "runtime/vmThread.hpp" -#include "utilities/events.hpp" -#include "utilities/xmlstream.hpp" - -#ifdef ASSERT - -#define SWEEP(nm) record_sweep(nm, __LINE__) -// Sweeper logging code -class SweeperRecord { - public: - int64_t traversal; - int compile_id; - int64_t traversal_mark; - int state; - const char* kind; - address vep; - address uep; - int line; - - void print() { - tty->print_cr("traversal = " INT64_FORMAT " compile_id = %d %s uep = " PTR_FORMAT " vep = " - PTR_FORMAT " state = %d traversal_mark " INT64_FORMAT " line = %d", - traversal, - compile_id, - kind == NULL ? "" : kind, - p2i(uep), - p2i(vep), - state, - traversal_mark, - line); - } -}; - -static int _sweep_index = 0; -static SweeperRecord* _records = NULL; - -void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) { - if (_records != NULL) { - _records[_sweep_index].traversal = _traversals; - _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0; - _records[_sweep_index].compile_id = nm->compile_id(); - _records[_sweep_index].kind = nm->compile_kind(); - _records[_sweep_index].state = nm->get_state(); - _records[_sweep_index].vep = nm->verified_entry_point(); - _records[_sweep_index].uep = nm->entry_point(); - _records[_sweep_index].line = line; - _sweep_index = (_sweep_index + 1) % SweeperLogEntries; - } -} - -void NMethodSweeper::init_sweeper_log() { - if (LogSweeper && _records == NULL) { - // Create the ring buffer for the logging code - _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); - memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); - } -} -#else -#define SWEEP(nm) -#endif - -CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method -int64_t NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. -int64_t NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache -int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache -size_t NMethodSweeper::_sweep_threshold_bytes = 0; // Threshold for when to sweep. Updated after ergonomics - -volatile bool NMethodSweeper::_should_sweep = false;// Indicates if a normal sweep will be done -volatile bool NMethodSweeper::_force_sweep = false;// Indicates if a forced sweep will be done -volatile size_t NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: - // 1) alive -> not_entrant - // 2) not_entrant -> zombie -int NMethodSweeper::_hotness_counter_reset_val = 0; - -int64_t NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed -int64_t NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed -size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache -Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping -Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep -Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep -Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction - -class MarkActivationClosure: public CodeBlobClosure { -public: - virtual void do_code_blob(CodeBlob* cb) { - nmethod* nm = cb->as_nmethod(); - nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); - // If we see an activation belonging to a non_entrant nmethod, we mark it. - if (nm->is_not_entrant()) { - nm->mark_as_seen_on_stack(); - } - } -}; -static MarkActivationClosure mark_activation_closure; - -int NMethodSweeper::hotness_counter_reset_val() { - if (_hotness_counter_reset_val == 0) { - _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; - } - return _hotness_counter_reset_val; -} -bool NMethodSweeper::wait_for_stack_scanning() { - return _current.end(); -} - -class NMethodMarkingClosure : public HandshakeClosure { -private: - CodeBlobClosure* _cl; -public: - NMethodMarkingClosure(CodeBlobClosure* cl) : HandshakeClosure("NMethodMarking"), _cl(cl) {} - void do_thread(Thread* thread) { - if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) { - JavaThread::cast(thread)->nmethods_do(_cl); - } - } -}; - -CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() { -#ifdef ASSERT - assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread"); - assert_lock_strong(CodeCache_lock); -#endif - - // If we do not want to reclaim not-entrant or zombie methods there is no need - // to scan stacks - if (!MethodFlushing) { - return NULL; - } - - // Check for restart - assert(_current.method() == NULL, "should only happen between sweeper cycles"); - assert(wait_for_stack_scanning(), "should only happen between sweeper cycles"); - - _seen = 0; - _current = CompiledMethodIterator(CompiledMethodIterator::all_blobs); - // Initialize to first nmethod - _current.next(); - _traversals += 1; - _total_time_this_sweep = Tickspan(); - - if (PrintMethodFlushing) { - tty->print_cr("### Sweep: stack traversal " INT64_FORMAT, _traversals); - } - return &mark_activation_closure; -} - -/** - * This function triggers a VM operation that does stack scanning of active - * methods. Stack scanning is mandatory for the sweeper to make progress. - */ -void NMethodSweeper::do_stack_scanning() { - assert(!CodeCache_lock->owned_by_self(), "just checking"); - if (Continuations::enabled()) { - // There are continuation stacks in the heap that need to be scanned. - Universe::heap()->collect(GCCause::_codecache_GC_threshold); - } - if (wait_for_stack_scanning()) { - CodeBlobClosure* code_cl; - { - MutexLocker ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag); - code_cl = prepare_mark_active_nmethods(); - } - if (code_cl != NULL) { - NMethodMarkingClosure nm_cl(code_cl); - Handshake::execute(&nm_cl); - } - } -} - -void NMethodSweeper::sweeper_loop() { - bool timeout; - while (true) { - { - ThreadBlockInVM tbivm(JavaThread::current()); - MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); - const int64_t wait_time = 60*60*24 * 1000; - timeout = waiter.wait(wait_time); - } - if (!timeout && (_should_sweep || _force_sweep)) { - sweep(); - } - } -} - -/** - * Wakes up the sweeper thread to sweep if code cache space runs low - */ -void NMethodSweeper::report_allocation() { - if (should_start_aggressive_sweep()) { - MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); - _should_sweep = true; - CodeSweeper_lock->notify(); - } -} - -bool NMethodSweeper::should_start_aggressive_sweep() { - // Makes sure that we do not invoke the sweeper too often during startup. - double start_threshold = 100.0 / (double)StartAggressiveSweepingAt; - double aggressive_sweep_threshold = MAX2(start_threshold, 1.1); - return (CodeCache::reverse_free_ratio() >= aggressive_sweep_threshold); -} - -/** - * Wakes up the sweeper thread and forces a sweep. Blocks until it finished. - */ -void NMethodSweeper::force_sweep() { - ThreadBlockInVM tbivm(JavaThread::current()); - MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); - // Request forced sweep - _force_sweep = true; - while (_force_sweep) { - // Notify sweeper that we want to force a sweep and wait for completion. - // In case a sweep currently takes place we timeout and try again because - // we want to enforce a full sweep. - CodeSweeper_lock->notify(); - waiter.wait(1000); - } -} - -/** - * Handle a safepoint request - */ -void NMethodSweeper::handle_safepoint_request() { - JavaThread* thread = JavaThread::current(); - if (SafepointMechanism::local_poll_armed(thread)) { - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nmethod_count()); - } - MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - - ThreadBlockInVM tbivm(thread); - } -} - -void NMethodSweeper::sweep() { - assert(_should_sweep || _force_sweep, "must have been set"); - assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); - Atomic::store(&_bytes_changed, static_cast(0)); // reset regardless of sleep reason - if (_should_sweep) { - MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); - _should_sweep = false; - } - - do_stack_scanning(); - - init_sweeper_log(); - sweep_code_cache(); - - // We are done with sweeping the code cache once. - _total_nof_code_cache_sweeps++; - - if (_force_sweep) { - // Notify requester that forced sweep finished - MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); - _force_sweep = false; - CodeSweeper_lock->notify(); - } -} - -static void post_sweep_event(EventSweepCodeCache* event, - const Ticks& start, - const Ticks& end, - s4 traversals, - int swept, - int flushed, - int zombified) { - assert(event != NULL, "invariant"); - assert(event->should_commit(), "invariant"); - event->set_starttime(start); - event->set_endtime(end); - event->set_sweepId(traversals); - event->set_sweptCount(swept); - event->set_flushedCount(flushed); - event->set_zombifiedCount(zombified); - event->commit(); -} - -void NMethodSweeper::sweep_code_cache() { - ResourceMark rm; - Ticks sweep_start_counter = Ticks::now(); - - log_debug(codecache, sweep, start)("CodeCache flushing"); - - int flushed_count = 0; - int zombified_count = 0; - int flushed_c2_count = 0; - - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nmethod_count()); - } - - int swept_count = 0; - assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); - assert(!CodeCache_lock->owned_by_self(), "just checking"); - - int freed_memory = 0; - { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - - while (!_current.end()) { - CodeCache::Sweep::begin(); - swept_count++; - // Since we will give up the CodeCache_lock, always skip ahead - // to the next nmethod. Other blobs can be deleted by other - // threads but nmethods are only reclaimed by the sweeper. - CompiledMethod* nm = _current.method(); - _current.next(); - - // Now ready to process nmethod and give up CodeCache_lock - { - MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - // Save information before potentially flushing the nmethod - // Only flushing nmethods so size only matters for them. - int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0; - bool is_c2_method = nm->is_compiled_by_c2(); - bool is_osr = nm->is_osr_method(); - int compile_id = nm->compile_id(); - intptr_t address = p2i(nm); - const char* state_before = nm->state(); - const char* state_after = ""; - - MethodStateChange type = process_compiled_method(nm); - switch (type) { - case Flushed: - state_after = "flushed"; - freed_memory += size; - ++flushed_count; - if (is_c2_method) { - ++flushed_c2_count; - } - break; - case MadeZombie: - state_after = "made zombie"; - ++zombified_count; - break; - case None: - break; - default: - ShouldNotReachHere(); - } - if (PrintMethodFlushing && Verbose && type != None) { - tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after); - } - } - - _seen++; - CodeCache::Sweep::end(); - handle_safepoint_request(); - } - } - - assert(_current.end(), "must have scanned the whole cache"); - - const Ticks sweep_end_counter = Ticks::now(); - const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; - { - MutexLocker mu(NMethodSweeperStats_lock, Mutex::_no_safepoint_check_flag); - _total_time_sweeping += sweep_time; - _total_time_this_sweep += sweep_time; - _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); - _total_flushed_size += freed_memory; - _total_nof_methods_reclaimed += flushed_count; - _total_nof_c2_methods_reclaimed += flushed_c2_count; - _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); - } - -#ifdef ASSERT - if(PrintMethodFlushing) { - tty->print_cr("### sweeper: sweep time(" JLONG_FORMAT "): ", sweep_time.value()); - } -#endif - - Log(codecache, sweep) log; - if (log.is_debug()) { - LogStream ls(log.debug()); - CodeCache::print_summary(&ls, false); - } - log_sweep("finished"); - - // Sweeper is the only case where memory is released, check here if it - // is time to restart the compiler. Only checking if there is a certain - // amount of free memory in the code cache might lead to re-enabling - // compilation although no memory has been released. For example, there are - // cases when compilation was disabled although there is 4MB (or more) free - // memory in the code cache. The reason is code cache fragmentation. Therefore, - // it only makes sense to re-enable compilation if we have actually freed memory. - // Note that typically several kB are released for sweeping 16MB of the code - // cache. As a result, 'freed_memory' > 0 to restart the compiler. - if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - log.debug("restart compiler"); - log_sweep("restart_compiler"); - EventJitRestart event; - event.set_freedMemory(freed_memory); - event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); - event.commit(); - } - - EventSweepCodeCache event(UNTIMED); - if (event.should_commit()) { - post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, flushed_count, zombified_count); - } -} - - // This function updates the sweeper statistics that keep track of nmethods - // state changes. If there is 'enough' state change, the sweeper is invoked - // as soon as possible. Also, we are guaranteed to invoke the sweeper if - // the code cache gets full. -void NMethodSweeper::report_state_change(nmethod* nm) { - Atomic::add(&_bytes_changed, (size_t)nm->total_size()); - if (Atomic::load(&_bytes_changed) > _sweep_threshold_bytes) { - MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag); - _should_sweep = true; - CodeSweeper_lock->notify(); // Wake up sweeper. - } -} - -class CompiledMethodMarker: public StackObj { - private: - CodeCacheSweeperThread* _thread; - public: - CompiledMethodMarker(CompiledMethod* cm) { - JavaThread* current = JavaThread::current(); - assert (current->is_Code_cache_sweeper_thread(), "Must be"); - _thread = (CodeCacheSweeperThread*)current; - if (!cm->is_zombie() && !cm->is_unloading()) { - // Only expose live nmethods for scanning - _thread->set_scanned_compiled_method(cm); - } - } - ~CompiledMethodMarker() { - _thread->set_scanned_compiled_method(NULL); - } -}; - -NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) { - assert(cm != NULL, "sanity"); - assert(!CodeCache_lock->owned_by_self(), "just checking"); - - MethodStateChange result = None; - // Make sure this nmethod doesn't get unloaded during the scan, - // since safepoints may happen during acquired below locks. - CompiledMethodMarker nmm(cm); - SWEEP(cm); - - // Skip methods that are currently referenced by the VM - if (cm->is_locked_by_vm()) { - // But still remember to clean-up inline caches for alive nmethods - if (cm->is_alive()) { - // Clean inline caches that point to zombie/non-entrant/unloaded nmethods - cm->cleanup_inline_caches(false); - SWEEP(cm); - } - return result; - } - - if (cm->is_zombie()) { - // All inline caches that referred to this nmethod were cleaned in the - // previous sweeper cycle. Now flush the nmethod from the code cache. - assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods"); - cm->flush(); - assert(result == None, "sanity"); - result = Flushed; - } else if (cm->is_not_entrant()) { - // If there are no current activations of this method on the - // stack we can safely convert it to a zombie method - OrderAccess::loadload(); // _stack_traversal_mark and _state - if (cm->can_convert_to_zombie()) { - // Code cache state change is tracked in make_zombie() - cm->make_zombie(); - SWEEP(cm); - assert(result == None, "sanity"); - result = MadeZombie; - assert(cm->is_zombie(), "nmethod must be zombie"); - } else { - // Still alive, clean up its inline caches - cm->cleanup_inline_caches(false); - SWEEP(cm); - } - } else if (cm->is_unloaded()) { - // Code is unloaded, so there are no activations on the stack. - // Convert the nmethod to zombie. - // Code cache state change is tracked in make_zombie() - cm->make_zombie(); - SWEEP(cm); - assert(result == None, "sanity"); - result = MadeZombie; - } else { - if (cm->is_nmethod()) { - possibly_flush((nmethod*)cm); - } - // Clean inline caches that point to zombie/non-entrant/unloaded nmethods - cm->cleanup_inline_caches(false); - SWEEP(cm); - } - return result; -} - - -void NMethodSweeper::possibly_flush(nmethod* nm) { - if (UseCodeCacheFlushing) { - if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed() && !nm->is_unloading()) { - bool make_not_entrant = false; - - // Do not make native methods not-entrant - nm->dec_hotness_counter(); - // Get the initial value of the hotness counter. This value depends on the - // ReservedCodeCacheSize - int reset_val = hotness_counter_reset_val(); - int time_since_reset = reset_val - nm->hotness_counter(); - double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity); - // The less free space in the code cache we have - the bigger reverse_free_ratio() is. - // I.e., 'threshold' increases with lower available space in the code cache and a higher - // NmethodSweepActivity. If the current hotness counter - which decreases from its initial - // value until it is reset by stack walking - is smaller than the computed threshold, the - // corresponding nmethod is considered for removal. - if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) { - // A method is marked as not-entrant if the method is - // 1) 'old enough': nm->hotness_counter() < threshold - // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush) - // The second condition is necessary if we are dealing with very small code cache - // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. - // The second condition ensures that methods are not immediately made not-entrant - // after compilation. - make_not_entrant = true; - } - - // The stack-scanning low-cost detection may not see the method was used (which can happen for - // flat profiles). Check the age counter for possible data. - if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) { - MethodCounters* mc = nm->method()->get_method_counters(Thread::current()); - if (mc != NULL) { - // Snapshot the value as it's changed concurrently - int age = mc->nmethod_age(); - if (MethodCounters::is_nmethod_hot(age)) { - // The method has gone through flushing, and it became relatively hot that it deopted - // before we could take a look at it. Give it more time to appear in the stack traces, - // proportional to the number of deopts. - MethodData* md = nm->method()->method_data(); - if (md != NULL && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) { - // It's been long enough, we still haven't seen it on stack. - // Try to flush it, but enable counters the next time. - mc->reset_nmethod_age(); - } else { - make_not_entrant = false; - } - } else if (MethodCounters::is_nmethod_warm(age)) { - // Method has counters enabled, and the method was used within - // previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing - // compiled state. - mc->reset_nmethod_age(); - // delay the next check - nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); - make_not_entrant = false; - } else if (MethodCounters::is_nmethod_age_unset(age)) { - // No counters were used before. Set the counters to the detection - // limit value. If the method is going to be used again it will be compiled - // with counters that we're going to use for analysis the next time. - mc->reset_nmethod_age(); - } else { - // Method was totally idle for 10 sweeps - // The counter already has the initial value, flush it and may be recompile - // later with counters - } - } - } - - if (make_not_entrant) { - nm->make_not_entrant(); - - // Code cache state change is tracked in make_not_entrant() - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", - nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold); - } - } - } - } -} - -// Print out some state information about the current sweep and the -// state of the code cache if it's requested. -void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { - if (PrintMethodFlushing) { - ResourceMark rm; - stringStream s; - // Dump code cache state into a buffer before locking the tty, - // because log_state() will use locks causing lock conflicts. - CodeCache::log_state(&s); - - ttyLocker ttyl; - tty->print("### sweeper: %s ", msg); - if (format != NULL) { - va_list ap; - va_start(ap, format); - tty->vprint(format, ap); - va_end(ap); - } - tty->print_cr("%s", s.as_string()); - } - - if (LogCompilation && (xtty != NULL)) { - ResourceMark rm; - stringStream s; - // Dump code cache state into a buffer before locking the tty, - // because log_state() will use locks causing lock conflicts. - CodeCache::log_state(&s); - - ttyLocker ttyl; - xtty->begin_elem("sweeper state='%s' traversals='" INT64_FORMAT "' ", msg, traversal_count()); - if (format != NULL) { - va_list ap; - va_start(ap, format); - xtty->vprint(format, ap); - va_end(ap); - } - xtty->print("%s", s.as_string()); - xtty->stamp(); - xtty->end_elem(); - } -} - -void NMethodSweeper::print(outputStream* out) { - ttyLocker ttyl; - out = (out == NULL) ? tty : out; - out->print_cr("Code cache sweeper statistics:"); - out->print_cr(" Total sweep time: %1.0lf ms", (double)_total_time_sweeping.value()/1000000); - out->print_cr(" Total number of full sweeps: " INT64_FORMAT, _total_nof_code_cache_sweeps); - out->print_cr(" Total number of flushed methods: " INT64_FORMAT " (thereof " INT64_FORMAT " C2 methods)", - _total_nof_methods_reclaimed, - _total_nof_c2_methods_reclaimed); - out->print_cr(" Total size of flushed methods: " SIZE_FORMAT " kB", _total_flushed_size/K); -} diff --git a/src/hotspot/share/runtime/sweeper.hpp b/src/hotspot/share/runtime/sweeper.hpp deleted file mode 100644 index 06daf37ee3a7e..0000000000000 --- a/src/hotspot/share/runtime/sweeper.hpp +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_RUNTIME_SWEEPER_HPP -#define SHARE_RUNTIME_SWEEPER_HPP - -class WhiteBox; - -#include "code/codeCache.hpp" -#include "utilities/ticks.hpp" - -class CodeBlobClosure; - -// An NmethodSweeper is an incremental cleaner for: -// - cleanup inline caches -// - reclamation of nmethods -// Removing nmethods from the code cache includes two operations -// 1) mark active nmethods -// Is done in 'do_stack_scanning()'. This function invokes a thread-local handshake -// that marks all nmethods that are active on a thread's stack, and resets their -// hotness counters. This allows the sweeper to assume that a decayed hotness counter -// of an nmethod implies that it is seemingly not used actively. -// 2) sweep nmethods -// Is done in sweep_code_cache(). This function is the only place in the -// sweeper where memory is reclaimed. Note that sweep_code_cache() is not -// called at a safepoint. However, sweep_code_cache() stops executing if -// another thread requests a safepoint. Consequently, 'mark_active_nmethods()' -// and sweep_code_cache() cannot execute at the same time. -// To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can -// be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency -// invalidation, and (iv) being replaced by a different method version (tiered -// compilation). Not-entrant nmethods cannot be called by Java threads, but they -// can still be active on the stack. To ensure that active nmethods are not reclaimed, -// we have to wait until the next marking phase has completed. If a not-entrant -// nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely -// remove the nmethod, all inline caches (IC) that point to the nmethod must be -// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's -// state change happens during separate sweeps. It may take at least 3 sweeps before an -// nmethod's space is freed. - -class NMethodSweeper : public AllStatic { - private: - enum MethodStateChange { - None, - MadeZombie, - Flushed - }; - static int64_t _traversals; // Stack scan count, also sweep ID. - static int64_t _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache - static CompiledMethodIterator _current; // Current compiled method - static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache - static size_t _sweep_threshold_bytes; // The threshold for when to invoke sweeps - - static volatile bool _should_sweep; // Indicates if a normal sweep will be done - static volatile bool _force_sweep; // Indicates if a forced sweep will be done - static volatile size_t _bytes_changed; // Counts the total nmethod size if the nmethod changed from: - // 1) alive -> not_entrant - // 2) not_entrant -> zombie - // Stat counters - static int64_t _total_nof_methods_reclaimed; // Accumulated nof methods flushed - static int64_t _total_nof_c2_methods_reclaimed; // Accumulated nof C2-compiled methods flushed - static size_t _total_flushed_size; // Total size of flushed methods - static int _hotness_counter_reset_val; - - static Tickspan _total_time_sweeping; // Accumulated time sweeping - static Tickspan _total_time_this_sweep; // Total time this sweep - static Tickspan _peak_sweep_time; // Peak time for a full sweep - static Tickspan _peak_sweep_fraction_time; // Peak time sweeping one fraction - - static MethodStateChange process_compiled_method(CompiledMethod *nm); - - static void init_sweeper_log() NOT_DEBUG_RETURN; - static bool wait_for_stack_scanning(); - static void sweep_code_cache(); - static void handle_safepoint_request(); - static void do_stack_scanning(); - static void sweep(); - public: - static int64_t traversal_count() { return _traversals; } - static size_t sweep_threshold_bytes() { return _sweep_threshold_bytes; } - static void set_sweep_threshold_bytes(size_t threshold) { _sweep_threshold_bytes = threshold; } - static int64_t total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; } - static const Tickspan total_time_sweeping() { return _total_time_sweeping; } - static const Tickspan peak_sweep_time() { return _peak_sweep_time; } - static const Tickspan peak_sweep_fraction_time() { return _peak_sweep_fraction_time; } - static void log_sweep(const char* msg, const char* format = NULL, ...) ATTRIBUTE_PRINTF(2, 3); - -#ifdef ASSERT - // Keep track of sweeper activity in the ring buffer - static void record_sweep(CompiledMethod* nm, int line); -#endif - - static CodeBlobClosure* prepare_mark_active_nmethods(); - static void sweeper_loop(); - static bool should_start_aggressive_sweep(); - static void force_sweep(); - static int hotness_counter_reset_val(); - static void report_state_change(nmethod* nm); - static void report_allocation(); // Possibly start the sweeper thread. - static void possibly_flush(nmethod* nm); - static void print(outputStream* out); // Printing/debugging - static void print() { print(tty); } -}; - -#endif // SHARE_RUNTIME_SWEEPER_HPP diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index d5de6bce9837e..6edb46ad2b77e 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -323,7 +323,6 @@ class Thread: public ThreadShadow { virtual bool is_VM_thread() const { return false; } virtual bool is_Java_thread() const { return false; } virtual bool is_Compiler_thread() const { return false; } - virtual bool is_Code_cache_sweeper_thread() const { return false; } virtual bool is_service_thread() const { return false; } virtual bool is_monitor_deflation_thread() const { return false; } virtual bool is_hidden_from_external_view() const { return false; } diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp index c7993b45a0b6f..aeb027c06ac9f 100644 --- a/src/hotspot/share/runtime/threads.cpp +++ b/src/hotspot/share/runtime/threads.cpp @@ -373,7 +373,7 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) { // Get the Java runtime name, version, and vendor info after java.lang.System is initialized. // Some values are actually configure-time constants but some can be set via the jlink tool and // so must be read dynamically. We treat them all the same. - InstanceKlass* ik = SystemDictionary::find_instance_klass(vmSymbols::java_lang_VersionProps(), + InstanceKlass* ik = SystemDictionary::find_instance_klass(THREAD, vmSymbols::java_lang_VersionProps(), Handle(), Handle()); { ResourceMark rm(main_thread); diff --git a/src/hotspot/share/runtime/vmOperation.hpp b/src/hotspot/share/runtime/vmOperation.hpp index cf30cb18915b8..774263ccded16 100644 --- a/src/hotspot/share/runtime/vmOperation.hpp +++ b/src/hotspot/share/runtime/vmOperation.hpp @@ -96,7 +96,6 @@ template(ClassLoaderStatsOperation) \ template(ClassLoaderHierarchyOperation) \ template(DumpHashtable) \ - template(DumpTouchedMethods) \ template(CleanClassLoaderDataMetaspaces) \ template(PrintCompileQueue) \ template(PrintClassHierarchy) \ diff --git a/src/hotspot/share/runtime/vmOperations.cpp b/src/hotspot/share/runtime/vmOperations.cpp index 0ddd0c919cefb..3e1599b56627b 100644 --- a/src/hotspot/share/runtime/vmOperations.cpp +++ b/src/hotspot/share/runtime/vmOperations.cpp @@ -91,7 +91,7 @@ void VM_Operation::print_on_error(outputStream* st) const { void VM_ClearICs::doit() { if (_preserve_static_stubs) { - CodeCache::cleanup_inline_caches(); + CodeCache::cleanup_inline_caches_whitebox(); } else { CodeCache::clear_inline_caches(); } diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index ec4faff242228..50c4612e852f9 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -287,7 +287,6 @@ nonstatic_field(DataLayout, _header._struct._bci, u2) \ nonstatic_field(DataLayout, _header._struct._traps, u4) \ nonstatic_field(DataLayout, _cells[0], intptr_t) \ - nonstatic_field(MethodCounters, _nmethod_age, int) \ nonstatic_field(MethodCounters, _invoke_mask, int) \ nonstatic_field(MethodCounters, _backedge_mask, int) \ COMPILER2_OR_JVMCI_PRESENT(nonstatic_field(MethodCounters, _interpreter_throwout_count, u2)) \ @@ -661,8 +660,6 @@ nonstatic_field(nmethod, _entry_point, address) \ nonstatic_field(nmethod, _verified_entry_point, address) \ nonstatic_field(nmethod, _osr_entry_point, address) \ - volatile_nonstatic_field(nmethod, _lock_count, jint) \ - volatile_nonstatic_field(nmethod, _stack_traversal_mark, int64_t) \ nonstatic_field(nmethod, _compile_id, int) \ nonstatic_field(nmethod, _comp_level, CompLevel) \ \ @@ -1317,7 +1314,6 @@ declare_type(ServiceThread, JavaThread) \ declare_type(NotificationThread, JavaThread) \ declare_type(CompilerThread, JavaThread) \ - declare_type(CodeCacheSweeperThread, JavaThread) \ declare_toplevel_type(OSThread) \ declare_toplevel_type(JavaFrameAnchor) \ \ diff --git a/src/hotspot/share/services/diagnosticCommand.cpp b/src/hotspot/share/services/diagnosticCommand.cpp index 02c843bca347a..8aeb47854247c 100644 --- a/src/hotspot/share/services/diagnosticCommand.cpp +++ b/src/hotspot/share/services/diagnosticCommand.cpp @@ -126,7 +126,6 @@ void DCmdRegistrant::register_dcmds(){ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #endif // LINUX - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); @@ -933,30 +932,6 @@ void ClassHierarchyDCmd::execute(DCmdSource source, TRAPS) { } #endif -class VM_DumpTouchedMethods : public VM_Operation { -private: - outputStream* _out; -public: - VM_DumpTouchedMethods(outputStream* out) { - _out = out; - } - - virtual VMOp_Type type() const { return VMOp_DumpTouchedMethods; } - - virtual void doit() { - Method::print_touched_methods(_out); - } -}; - -void TouchedMethodsDCmd::execute(DCmdSource source, TRAPS) { - if (!LogTouchedMethods) { - output()->print_cr("VM.print_touched_methods command requires -XX:+LogTouchedMethods"); - return; - } - VM_DumpTouchedMethods dumper(output()); - VMThread::execute(&dumper); -} - ClassesDCmd::ClassesDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), _verbose("-verbose", diff --git a/src/hotspot/share/services/diagnosticCommand.hpp b/src/hotspot/share/services/diagnosticCommand.hpp index b15987ed18b43..d9f017340bc4b 100644 --- a/src/hotspot/share/services/diagnosticCommand.hpp +++ b/src/hotspot/share/services/diagnosticCommand.hpp @@ -386,21 +386,6 @@ class ClassHierarchyDCmd : public DCmdWithParser { virtual void execute(DCmdSource source, TRAPS); }; -class TouchedMethodsDCmd : public DCmd { -public: - TouchedMethodsDCmd(outputStream* output, bool heap) : DCmd(output, heap) {} - static const char* name() { - return "VM.print_touched_methods"; - } - static const char* description() { - return "Print all methods that have ever been touched during the lifetime of this JVM."; - } - static const char* impact() { - return "Medium: Depends on Java content."; - } - virtual void execute(DCmdSource source, TRAPS); -}; - #if INCLUDE_CDS class DumpSharedArchiveDCmd: public DCmdWithParser { protected: diff --git a/src/hotspot/share/services/finalizerService.cpp b/src/hotspot/share/services/finalizerService.cpp index b3b5eb7450b9d..202a1af08011a 100644 --- a/src/hotspot/share/services/finalizerService.cpp +++ b/src/hotspot/share/services/finalizerService.cpp @@ -166,12 +166,6 @@ static const size_t DEFAULT_TABLE_SIZE = 2048; static const size_t MAX_SIZE = 24; static volatile bool _has_work = false; -static size_t ceil_log2(size_t value) { - size_t ret; - for (ret = 1; ((size_t)1 << ret) < value; ++ret); - return ret; -} - class FinalizerEntryLookupResult { private: FinalizerEntry* _result; diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp index 9dd26717d5141..9de18f5b7bc2b 100644 --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -410,6 +410,9 @@ class AbstractDumpWriter : public StackObj { // Returns true if we have enough room in the buffer for 'len' bytes. bool can_write_fast(size_t len); + + void write_address(address a); + public: AbstractDumpWriter() : _buffer(NULL), @@ -429,6 +432,7 @@ class AbstractDumpWriter : public StackObj { void write_u4(u4 x); void write_u8(u8 x); void write_objectID(oop o); + void write_rootID(oop* p); void write_symbolID(Symbol* o); void write_classID(Klass* k); void write_id(u4 x); @@ -511,8 +515,7 @@ void AbstractDumpWriter::write_u8(u8 x) { WRITE_KNOWN_TYPE(&v, 8); } -void AbstractDumpWriter::write_objectID(oop o) { - address a = cast_from_oop
(o); +void AbstractDumpWriter::write_address(address a) { #ifdef _LP64 write_u8((u8)a); #else @@ -520,13 +523,16 @@ void AbstractDumpWriter::write_objectID(oop o) { #endif } +void AbstractDumpWriter::write_objectID(oop o) { + write_address(cast_from_oop
(o)); +} + +void AbstractDumpWriter::write_rootID(oop* p) { + write_address((address)p); +} + void AbstractDumpWriter::write_symbolID(Symbol* s) { - address a = (address)((uintptr_t)s); -#ifdef _LP64 - write_u8((u8)a); -#else - write_u4((u4)a); -#endif + write_address((address)((uintptr_t)s)); } void AbstractDumpWriter::write_id(u4 x) { @@ -1581,7 +1587,7 @@ void JNIGlobalsDumper::do_oop(oop* obj_p) { u4 size = 1 + 2 * sizeof(address); writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size); writer()->write_objectID(o); - writer()->write_objectID((oopDesc*)obj_p); // global ref ID + writer()->write_rootID(obj_p); // global ref ID writer()->end_sub_record(); } }; diff --git a/src/hotspot/share/services/threadIdTable.cpp b/src/hotspot/share/services/threadIdTable.cpp index 0e10995430793..c1f8633463f85 100644 --- a/src/hotspot/share/services/threadIdTable.cpp +++ b/src/hotspot/share/services/threadIdTable.cpp @@ -80,12 +80,6 @@ class ThreadIdTableConfig : public AllStatic { } }; -static size_t ceil_log2(size_t val) { - size_t ret; - for (ret = 1; ((size_t)1 << ret) < val; ++ret); - return ret; -} - // Lazily creates the table and populates it with the given // thread list void ThreadIdTable::lazy_initialize(const ThreadsList *threads) { diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp index b4cbcf0a1d751..f004be604f649 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1229,7 +1229,11 @@ inline TableStatistics ConcurrentHashTable:: summary.add((double)count); } - return TableStatistics(*_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node)); + if (_stats_rate == nullptr) { + return TableStatistics(summary, literal_bytes, sizeof(Bucket), sizeof(Node)); + } else { + return TableStatistics(*_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node)); + } } template diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index c6da9e07fca79..f9fa4d081f7a5 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -1133,7 +1133,6 @@ inline intx byte_size(void* from, void* to) { return (address)to - (address)from; } - // Pack and extract shorts to/from ints: inline int extract_low_short_from_int(jint x) { diff --git a/src/hotspot/share/utilities/hashtable.cpp b/src/hotspot/share/utilities/hashtable.cpp index 9a2f2a2a4d290..51b8927655fc5 100644 --- a/src/hotspot/share/utilities/hashtable.cpp +++ b/src/hotspot/share/utilities/hashtable.cpp @@ -263,5 +263,4 @@ template class Hashtable; template class Hashtable; template class Hashtable; -template void BasicHashtable::verify_table(char const*); template void BasicHashtable::verify_table(char const*); diff --git a/src/hotspot/share/utilities/moveBits.hpp b/src/hotspot/share/utilities/moveBits.hpp new file mode 100644 index 0000000000000..35d30c0ddf901 --- /dev/null +++ b/src/hotspot/share/utilities/moveBits.hpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_UTILITIES_MOVEBITS_HPP +#define SHARE_UTILITIES_MOVEBITS_HPP + +#include "metaprogramming/conditional.hpp" +#include "metaprogramming/enableIf.hpp" +#include "utilities/globalDefinitions.hpp" +#include + +template +class ReverseBitsImpl { + static const size_t NB = sizeof(T) * BitsPerByte; + + static_assert((NB == 8) || (NB == 16) || (NB == 32) || (NB == 64), + "unsupported size"); + + // The unsigned integral type for calculations. + using I = typename Conditional::type; + + static const I rep_5555 = static_cast(UCONST64(0x5555555555555555)); + static const I rep_3333 = static_cast(UCONST64(0x3333333333333333)); + static const I rep_0F0F = static_cast(UCONST64(0x0F0F0F0F0F0F0F0F)); + static const I rep_00FF = static_cast(UCONST64(0x00FF00FF00FF00FF)); + static const I rep_FFFF = static_cast(UCONST64(0x0000FFFF0000FFFF)); + +public: + + static constexpr T reverse_bits_in_bytes(T v) { + // Based on Hacker's Delight Section 7-1 + auto x = static_cast(v); + x = ((x & rep_5555) << 1) | ((x >> 1) & rep_5555); + x = ((x & rep_3333) << 2) | ((x >> 2) & rep_3333); + x = ((x & rep_0F0F) << 4) | ((x >> 4) & rep_0F0F); + return static_cast(x); + } + + static constexpr T reverse_bytes(T v) { + // Based on Hacker's Delight Section 7-1 + // NB: Compilers are good at recognizing byte-swap code and transforming + // it into platform-specific instructions like x86 bswap. + auto x = static_cast(v); + switch (NB) { + case 64: + // The use of NB/2 rather than 32 avoids a warning in dead code when + // I is uint32_t, because shifting a 32bit type by 32 is UB. + x = (x << (NB/2)) | (x >> (NB/2)); + case 32: // fallthrough + x = ((x & rep_FFFF) << 16) | ((x >> 16) & rep_FFFF); + case 16: // fallthrough + x = ((x & rep_00FF) << 8) | ((x >> 8) & rep_00FF); + default: // fallthrough + return static_cast(x); + } + } +}; + +// Performs byte reversal of an integral type up to 64 bits. +template ::value)> +constexpr T reverse_bytes(T x) { + return ReverseBitsImpl::reverse_bytes(x); +} + +// Performs bytewise bit reversal of each byte of an integral +// type up to 64 bits. +template ::value)> +constexpr T reverse_bits_in_bytes(T x) { + return ReverseBitsImpl::reverse_bits_in_bytes(x); +} + +// Performs full bit reversal an integral type up to 64 bits. +template ::value)> +constexpr T reverse_bits(T x) { + return reverse_bytes(reverse_bits_in_bytes(x)); +} + +#endif // SHARE_UTILITIES_MOVEBITS_HPP diff --git a/src/hotspot/share/utilities/powerOfTwo.hpp b/src/hotspot/share/utilities/powerOfTwo.hpp index a98b81e803709..29abc972eaa82 100644 --- a/src/hotspot/share/utilities/powerOfTwo.hpp +++ b/src/hotspot/share/utilities/powerOfTwo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,7 +103,8 @@ inline T round_down_power_of_2(T value) { template::value)> inline T round_up_power_of_2(T value) { assert(value > 0, "Invalid value"); - assert(value <= max_power_of_2(), "Overflow"); + assert(value <= max_power_of_2(), "Overflowing maximum allowed power of two with " UINT64_FORMAT_X, + static_cast(value)); if (is_power_of_2(value)) { return value; } @@ -119,4 +120,22 @@ inline T next_power_of_2(T value) { return round_up_power_of_2(value + 1); } +// Find log2 value greater than this input +template ::value)> +inline T ceil_log2(T value) { + T ret; + for (ret = 1; ((T)1 << ret) < value; ++ret); + return ret; +} + +// Return the largest power of two that is a submultiple of the given value. +// This is the same as the numeric value of the least-significant set bit. +// For unsigned values, it replaces the old trick of (value & -value). +// precondition: value > 0. +template::value)> +inline T submultiple_power_of_2(T value) { + assert(value > 0, "Invalid value"); + return value & -value; +} + #endif // SHARE_UTILITIES_POWEROFTWO_HPP diff --git a/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java b/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java index b60d2152c7df7..8f0ac0102b007 100644 --- a/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java +++ b/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java @@ -121,7 +121,13 @@ public long getMemoryFailCount() { @Override public long getMemoryLimit() { - return subsystem.getMemoryLimit(); + long subsMem = subsystem.getMemoryLimit(); + // Catch the cgroup memory limit exceeding host physical memory. + // Treat this as unlimited. + if (subsMem >= getTotalMemorySize0()) { + return CgroupSubsystem.LONG_RETVAL_UNLIMITED; + } + return subsMem; } @Override @@ -178,5 +184,6 @@ public static Metrics getInstance() { } private static native boolean isUseContainerSupport(); + private static native long getTotalMemorySize0(); } diff --git a/src/java.base/linux/native/libjava/CgroupMetrics.c b/src/java.base/linux/native/libjava/CgroupMetrics.c index 8c9a9dd7a7ea2..4eaac0c6dd483 100644 --- a/src/java.base/linux/native/libjava/CgroupMetrics.c +++ b/src/java.base/linux/native/libjava/CgroupMetrics.c @@ -22,6 +22,7 @@ * or visit www.oracle.com if you need additional information or have any * questions. */ +#include #include "jni.h" #include "jvm.h" @@ -33,3 +34,10 @@ Java_jdk_internal_platform_CgroupMetrics_isUseContainerSupport(JNIEnv *env, jcla { return JVM_IsUseContainerSupport(); } + +JNIEXPORT jlong JNICALL +Java_jdk_internal_platform_CgroupMetrics_getTotalMemorySize0 + (JNIEnv *env, jclass ignored) +{ + return sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE); +} diff --git a/src/java.base/share/classes/java/net/ContentHandler.java b/src/java.base/share/classes/java/net/ContentHandler.java index 1a497ccbad6b0..356c583f20bd3 100644 --- a/src/java.base/share/classes/java/net/ContentHandler.java +++ b/src/java.base/share/classes/java/net/ContentHandler.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ * If no content handler could be {@linkplain URLConnection#getContent() found}, * URLConnection will look for a content handler in a user-definable set of places. * Users can define a vertical-bar delimited set of class prefixes - * to search through by defining the {@value java.net.URLConnection#contentPathProp} + * to search through by defining the {@systemProperty java.content.handler.pkgs} system * property. The class name must be of the form: *
* {package-prefix}.{major}.{minor} diff --git a/src/java.base/share/classes/java/text/DateFormatSymbols.java b/src/java.base/share/classes/java/text/DateFormatSymbols.java index a76c7cb44f4c0..f621b1d115c8d 100644 --- a/src/java.base/share/classes/java/text/DateFormatSymbols.java +++ b/src/java.base/share/classes/java/text/DateFormatSymbols.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -697,11 +697,6 @@ public boolean equals(Object obj) // =======================privates=============================== - /** - * Useful constant for defining time zone offsets. - */ - static final int millisPerHour = 60*60*1000; - /** * Cache to hold DateFormatSymbols instances per Locale. */ diff --git a/src/java.base/share/classes/java/text/MergeCollation.java b/src/java.base/share/classes/java/text/MergeCollation.java index 1b092ed207ac1..41c4bef71cf2f 100644 --- a/src/java.base/share/classes/java/text/MergeCollation.java +++ b/src/java.base/share/classes/java/text/MergeCollation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ public String getPattern() { * before & and < */ public String getPattern(boolean withWhiteSpace) { - StringBuffer result = new StringBuffer(); + StringBuilder result = new StringBuilder(); PatternEntry tmp = null; ArrayList extList = null; int i; @@ -146,7 +146,7 @@ public String emitPattern() { * builder. */ public String emitPattern(boolean withWhiteSpace) { - StringBuffer result = new StringBuffer(); + StringBuilder result = new StringBuilder(); for (int i = 0; i < patterns.size(); ++i) { PatternEntry entry = patterns.get(i); diff --git a/src/java.base/share/classes/java/text/PatternEntry.java b/src/java.base/share/classes/java/text/PatternEntry.java index 858c3152cb711..53229f294e564 100644 --- a/src/java.base/share/classes/java/text/PatternEntry.java +++ b/src/java.base/share/classes/java/text/PatternEntry.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2000, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,14 +52,14 @@ class PatternEntry { /** * Gets the current extension, quoted */ - public void appendQuotedExtension(StringBuffer toAddTo) { + private void appendQuotedExtension(StringBuilder toAddTo) { appendQuoted(extension,toAddTo); } /** * Gets the current chars, quoted */ - public void appendQuotedChars(StringBuffer toAddTo) { + private void appendQuotedChars(StringBuilder toAddTo) { appendQuoted(chars,toAddTo); } @@ -83,7 +83,7 @@ public int hashCode() { * For debugging. */ public String toString() { - StringBuffer result = new StringBuffer(); + StringBuilder result = new StringBuilder(); addToBuffer(result, true, false, null); return result.toString(); } @@ -111,7 +111,7 @@ final String getChars() { // ===== privates ===== - void addToBuffer(StringBuffer toAddTo, + void addToBuffer(StringBuilder toAddTo, boolean showExtension, boolean showWhiteSpace, PatternEntry lastEntry) @@ -151,7 +151,7 @@ void addToBuffer(StringBuffer toAddTo, } } - static void appendQuoted(String chars, StringBuffer toAddTo) { + private static void appendQuoted(String chars, StringBuilder toAddTo) { boolean inQuote = false; char ch = chars.charAt(0); if (Character.isSpaceChar(ch)) { diff --git a/src/java.base/share/classes/javax/crypto/Cipher.java b/src/java.base/share/classes/javax/crypto/Cipher.java index 5d2f37d631276..2eb2ffe14a4aa 100644 --- a/src/java.base/share/classes/javax/crypto/Cipher.java +++ b/src/java.base/share/classes/javax/crypto/Cipher.java @@ -1239,6 +1239,8 @@ private static void checkOpmode(int opmode) { * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values */ public final void init(int opmode, Key key) throws InvalidKeyException { init(opmode, key, JCAUtil.getDefSecureRandom()); @@ -1294,6 +1296,8 @@ public final void init(int opmode, Key key) throws InvalidKeyException { * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values */ public final void init(int opmode, Key key, SecureRandom random) throws InvalidKeyException @@ -1379,6 +1383,9 @@ public final void init(int opmode, Key key, SecureRandom random) * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values + * */ public final void init(int opmode, Key key, AlgorithmParameterSpec params) throws InvalidKeyException, InvalidAlgorithmParameterException @@ -1441,6 +1448,9 @@ public final void init(int opmode, Key key, AlgorithmParameterSpec params) * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values + * */ public final void init(int opmode, Key key, AlgorithmParameterSpec params, SecureRandom random) @@ -1522,6 +1532,8 @@ public final void init(int opmode, Key key, AlgorithmParameterSpec params, * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values */ public final void init(int opmode, Key key, AlgorithmParameters params) throws InvalidKeyException, InvalidAlgorithmParameterException @@ -1584,6 +1596,8 @@ public final void init(int opmode, Key key, AlgorithmParameters params) * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values */ public final void init(int opmode, Key key, AlgorithmParameters params, SecureRandom random) @@ -1671,6 +1685,8 @@ public final void init(int opmode, Key key, AlgorithmParameters params, * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values */ public final void init(int opmode, Certificate certificate) throws InvalidKeyException @@ -1740,6 +1756,8 @@ public final void init(int opmode, Certificate certificate) * @throws UnsupportedOperationException if {@code opmode} is * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented * by the underlying {@code CipherSpi} + * @throws InvalidParameterException if {@code opmode} is not one of the + * recognized values */ public final void init(int opmode, Certificate certificate, SecureRandom random) diff --git a/src/java.base/share/classes/javax/crypto/Mac.java b/src/java.base/share/classes/javax/crypto/Mac.java index 8d4217ab0c3a5..a8ab43177c11d 100644 --- a/src/java.base/share/classes/javax/crypto/Mac.java +++ b/src/java.base/share/classes/javax/crypto/Mac.java @@ -423,7 +423,7 @@ private String getProviderName() { * * @param key the key. * - * @exception InvalidKeyException if the given key is inappropriate for + * @throws InvalidKeyException if the given key is inappropriate for * initializing this MAC. */ public final void init(Key key) throws InvalidKeyException { @@ -451,9 +451,9 @@ public final void init(Key key) throws InvalidKeyException { * @param key the key. * @param params the algorithm parameters. * - * @exception InvalidKeyException if the given key is inappropriate for + * @throws InvalidKeyException if the given key is inappropriate for * initializing this MAC. - * @exception InvalidAlgorithmParameterException if the given algorithm + * @throws InvalidAlgorithmParameterException if the given algorithm * parameters are inappropriate for this MAC. */ public final void init(Key key, AlgorithmParameterSpec params) @@ -476,7 +476,7 @@ public final void init(Key key, AlgorithmParameterSpec params) * * @param input the input byte to be processed. * - * @exception IllegalStateException if this {@code Mac} has not been + * @throws IllegalStateException if this {@code Mac} has not been * initialized. */ public final void update(byte input) throws IllegalStateException { @@ -492,7 +492,7 @@ public final void update(byte input) throws IllegalStateException { * * @param input the array of bytes to be processed. * - * @exception IllegalStateException if this {@code Mac} has not been + * @throws IllegalStateException if this {@code Mac} has not been * initialized. */ public final void update(byte[] input) throws IllegalStateException { @@ -513,7 +513,7 @@ public final void update(byte[] input) throws IllegalStateException { * @param offset the offset in {@code input} where the input starts. * @param len the number of bytes to process. * - * @exception IllegalStateException if this {@code Mac} has not been + * @throws IllegalStateException if this {@code Mac} has not been * initialized. */ public final void update(byte[] input, int offset, int len) @@ -538,8 +538,9 @@ public final void update(byte[] input, int offset, int len) * * @param input the ByteBuffer * - * @exception IllegalStateException if this {@code Mac} has not been + * @throws IllegalStateException if this {@code Mac} has not been * initialized. + * @throws IllegalArgumentException if {@code input} is null * @since 1.5 */ public final void update(ByteBuffer input) { @@ -569,7 +570,7 @@ public final void update(ByteBuffer input) { * * @return the MAC result. * - * @exception IllegalStateException if this {@code Mac} has not been + * @throws IllegalStateException if this {@code Mac} has not been * initialized. */ public final byte[] doFinal() throws IllegalStateException { @@ -603,9 +604,9 @@ public final byte[] doFinal() throws IllegalStateException { * @param outOffset the offset in {@code output} where the MAC is * stored * - * @exception ShortBufferException if the given output buffer is too small + * @throws ShortBufferException if the given output buffer is too small * to hold the result - * @exception IllegalStateException if this {@code Mac} has not been + * @throws IllegalStateException if this {@code Mac} has not been * initialized. */ public final void doFinal(byte[] output, int outOffset) @@ -641,7 +642,7 @@ public final void doFinal(byte[] output, int outOffset) * @param input data in bytes * @return the MAC result. * - * @exception IllegalStateException if this {@code Mac} has not been + * @throws IllegalStateException if this {@code Mac} has not been * initialized. */ public final byte[] doFinal(byte[] input) throws IllegalStateException @@ -678,7 +679,7 @@ public final void reset() { * * @return a clone if the provider implementation is cloneable. * - * @exception CloneNotSupportedException if this is called on a + * @throws CloneNotSupportedException if this is called on a * delegate that does not support {@code Cloneable}. */ public final Object clone() throws CloneNotSupportedException { diff --git a/src/java.base/share/classes/javax/crypto/MacSpi.java b/src/java.base/share/classes/javax/crypto/MacSpi.java index d8fdeda6ee2f2..dbed0fa9877a3 100644 --- a/src/java.base/share/classes/javax/crypto/MacSpi.java +++ b/src/java.base/share/classes/javax/crypto/MacSpi.java @@ -65,9 +65,9 @@ public MacSpi() {} * @param key the (secret) key. * @param params the algorithm parameters. * - * @exception InvalidKeyException if the given key is inappropriate for + * @throws InvalidKeyException if the given key is inappropriate for * initializing this MAC. - * @exception InvalidAlgorithmParameterException if the given algorithm + * @throws InvalidAlgorithmParameterException if the given algorithm * parameters are inappropriate for this MAC. */ protected abstract void engineInit(Key key, @@ -101,6 +101,9 @@ protected abstract void engineInit(Key key, * process ByteBuffers more efficiently than byte arrays. * * @param input the ByteBuffer + * + * @throws NullPointerException if {@code input} is null + * * @since 1.5 */ protected void engineUpdate(ByteBuffer input) { @@ -145,7 +148,7 @@ protected void engineUpdate(ByteBuffer input) { * * @return a clone if the implementation is cloneable. * - * @exception CloneNotSupportedException if this is called + * @throws CloneNotSupportedException if this is called * on an implementation that does not support {@code Cloneable}. */ public Object clone() throws CloneNotSupportedException { diff --git a/src/java.base/share/classes/jdk/internal/module/ModuleReferenceImpl.java b/src/java.base/share/classes/jdk/internal/module/ModuleReferenceImpl.java index 209131c95c381..e460b0fc47eb6 100644 --- a/src/java.base/share/classes/jdk/internal/module/ModuleReferenceImpl.java +++ b/src/java.base/share/classes/jdk/internal/module/ModuleReferenceImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,8 +62,11 @@ public class ModuleReferenceImpl extends ModuleReference { // ModuleResolution flags private final ModuleResolution moduleResolution; - // cached hash of this module to avoid needing to compute it many times - private byte[] cachedHash; + // Single-slot cache of this module's hash to avoid needing to compute + // it many times. For correctness under concurrent updates, we need to + // wrap the fields updated at the same time with a record. + private record CachedHash(byte[] hash, String algorithm) {} + private CachedHash cachedHash; /** * Constructs a new instance of this class. @@ -139,13 +142,17 @@ public ModuleResolution moduleResolution() { * @throws java.io.UncheckedIOException if an I/O error occurs */ public byte[] computeHash(String algorithm) { - byte[] result = cachedHash; - if (result != null) - return result; - if (hasher == null) + CachedHash ch = cachedHash; + if (ch != null && ch.algorithm().equals(algorithm)) { + return ch.hash(); + } + + if (hasher == null) { return null; - cachedHash = result = hasher.generate(algorithm); - return result; + } + byte[] hash = hasher.generate(algorithm); + cachedHash = new CachedHash(hash, algorithm); + return hash; } @Override diff --git a/src/java.base/share/classes/sun/nio/ch/FileChannelImpl.java b/src/java.base/share/classes/sun/nio/ch/FileChannelImpl.java index 17bd3333db192..697bb5d640662 100644 --- a/src/java.base/share/classes/sun/nio/ch/FileChannelImpl.java +++ b/src/java.base/share/classes/sun/nio/ch/FileChannelImpl.java @@ -570,10 +570,11 @@ private long transferToDirectlyInternal(long position, int icount, ti = threads.add(); if (!isOpen()) return -1; + boolean append = fdAccess.getAppend(targetFD); do { long comp = Blocker.begin(); try { - n = transferTo0(fd, position, icount, targetFD); + n = transferTo0(fd, position, icount, targetFD, append); } finally { Blocker.end(comp); } @@ -801,7 +802,8 @@ private long transferFromDirectlyInternal(FileDescriptor srcFD, do { long comp = Blocker.begin(); try { - n = transferFrom0(srcFD, fd, position, count); + boolean append = fdAccess.getAppend(fd); + n = transferFrom0(srcFD, fd, position, count, append); } finally { Blocker.end(comp); } @@ -1573,11 +1575,13 @@ private native long map0(FileDescriptor fd, int prot, long position, // Transfers from src to dst, or returns IOStatus.UNSUPPORTED (-4) or // IOStatus.UNSUPPORTED_CASE (-6) if the kernel does not support it private static native long transferTo0(FileDescriptor src, long position, - long count, FileDescriptor dst); + long count, FileDescriptor dst, + boolean append); private static native long transferFrom0(FileDescriptor src, FileDescriptor dst, - long position, long count); + long position, long count, + boolean append); // Retrieves the maximum size of a transfer private static native int maxDirectTransferSize0(); diff --git a/src/java.base/unix/native/libnio/ch/FileChannelImpl.c b/src/java.base/unix/native/libnio/ch/FileChannelImpl.c index 0511183ced6d4..c14e0611f5d0b 100644 --- a/src/java.base/unix/native/libnio/ch/FileChannelImpl.c +++ b/src/java.base/unix/native/libnio/ch/FileChannelImpl.c @@ -169,14 +169,41 @@ JNIEXPORT jlong JNICALL Java_sun_nio_ch_FileChannelImpl_transferTo0(JNIEnv *env, jobject this, jobject srcFDO, jlong position, jlong count, - jobject dstFDO) + jobject dstFDO, jboolean append) { jint srcFD = fdval(env, srcFDO); jint dstFD = fdval(env, dstFDO); #if defined(__linux__) + // copy_file_range fails with EBADF when appending, and sendfile + // fails with EINVAL + if (append == JNI_TRUE) + return IOS_UNSUPPORTED_CASE; + off64_t offset = (off64_t)position; - jlong n = sendfile64(dstFD, srcFD, &offset, (size_t)count); + jlong n; + if (my_copy_file_range_func != NULL) { + size_t len = (size_t)count; + n = my_copy_file_range_func(srcFD, &offset, dstFD, NULL, len, 0); + if (n < 0) { + switch (errno) { + case EINTR: + return IOS_INTERRUPTED; + case EINVAL: + case ENOSYS: + case EXDEV: + // ignore and try sendfile() + break; + default: + JNU_ThrowIOExceptionWithLastError(env, "Copy failed"); + return IOS_THROWN; + } + } + if (n >= 0) + return n; + } + + n = sendfile64(dstFD, srcFD, &offset, (size_t)count); if (n < 0) { if (errno == EAGAIN) return IOS_UNAVAILABLE; @@ -262,20 +289,27 @@ Java_sun_nio_ch_FileChannelImpl_transferTo0(JNIEnv *env, jobject this, JNIEXPORT jlong JNICALL Java_sun_nio_ch_FileChannelImpl_transferFrom0(JNIEnv *env, jobject this, jobject srcFDO, jobject dstFDO, - jlong position, jlong count) + jlong position, jlong count, + jboolean append) { #if defined(__linux__) if (my_copy_file_range_func == NULL) return IOS_UNSUPPORTED; + // copy_file_range fails with EBADF when appending + if (append == JNI_TRUE) + return IOS_UNSUPPORTED_CASE; jint srcFD = fdval(env, srcFDO); jint dstFD = fdval(env, dstFDO); off64_t offset = (off64_t)position; - jlong n = my_copy_file_range_func(srcFD, NULL, dstFD, &offset, count, 0); + size_t len = (size_t)count; + jlong n = my_copy_file_range_func(srcFD, NULL, dstFD, &offset, len, 0); if (n < 0) { if (errno == EAGAIN) return IOS_UNAVAILABLE; + if (errno == ENOSYS) + return IOS_UNSUPPORTED_CASE; if ((errno == EBADF || errno == EINVAL || errno == EXDEV) && ((ssize_t)count >= 0)) return IOS_UNSUPPORTED_CASE; diff --git a/src/java.base/windows/native/libnio/ch/FileChannelImpl.c b/src/java.base/windows/native/libnio/ch/FileChannelImpl.c index 554bc864b8bf9..ce257658d7398 100644 --- a/src/java.base/windows/native/libnio/ch/FileChannelImpl.c +++ b/src/java.base/windows/native/libnio/ch/FileChannelImpl.c @@ -147,7 +147,7 @@ JNIEXPORT jlong JNICALL Java_sun_nio_ch_FileChannelImpl_transferTo0(JNIEnv *env, jobject this, jobject srcFD, jlong position, jlong count, - jobject dstFD) + jobject dstFD, jboolean append) { const int PACKET_SIZE = 524288; @@ -191,7 +191,8 @@ Java_sun_nio_ch_FileChannelImpl_transferTo0(JNIEnv *env, jobject this, JNIEXPORT jlong JNICALL Java_sun_nio_ch_FileChannelImpl_transferFrom0(JNIEnv *env, jobject this, jobject srcFDO, jobject dstFDO, - jlong position, jlong count) + jlong position, jlong count, + jboolean append) { return IOS_UNSUPPORTED; } diff --git a/src/java.desktop/macosx/classes/com/apple/laf/AquaMenuPainter.java b/src/java.desktop/macosx/classes/com/apple/laf/AquaMenuPainter.java index be700927aa12e..5a13822fd5acd 100644 --- a/src/java.desktop/macosx/classes/com/apple/laf/AquaMenuPainter.java +++ b/src/java.desktop/macosx/classes/com/apple/laf/AquaMenuPainter.java @@ -494,13 +494,15 @@ private String layoutMenuItem(final JMenuItem menuItem, final FontMetrics fm, fi if (!isTopLevelMenu) { // if ( GetSysDirection() < 0 ) hierRect.right = hierRect.left + w + 4; // else hierRect.left = hierRect.right - w - 4; - arrowIconR.x = (viewR.width - arrowIconR.width) + 1; - arrowIconR.y = viewR.y + (labelR.height / 2) - (arrowIconR.height / 2) + 1; - checkIconR.y = viewR.y + (labelR.height / 2) - (checkIconR.height / 2); + arrowIconR.x = Math.abs((viewR.width - arrowIconR.width) + 1); + arrowIconR.y = Math.abs(viewR.y + (labelR.height / 2) - (arrowIconR.height / 2) + 1); + + checkIconR.y = Math.abs(viewR.y + (labelR.height / 2) - (checkIconR.height / 2)); checkIconR.x = 5; textR.width += 8; + } /*System.out.println("Layout: " +horizontalAlignment+ " v=" +viewR+" c="+checkIconR+" i="+ diff --git a/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.h b/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.h index dceac2ad433da..83e868b37b711 100644 --- a/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.h +++ b/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.h @@ -55,6 +55,7 @@ @property (readwrite, assign) int topInset; @property (readwrite, assign) int leftInset; @property (readwrite, assign) CVDisplayLinkRef displayLink; +@property (readwrite, atomic) int displayLinkCount; - (id) initWithJavaLayer:(jobject)layer; diff --git a/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.m b/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.m index 1d30f7eb51df1..8967cca2e35c8 100644 --- a/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.m +++ b/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLLayer.m @@ -29,6 +29,7 @@ #import "LWCToolkit.h" #import "MTLSurfaceData.h" #import "JNIUtilities.h" +#define KEEP_ALIVE_INC 4 @implementation MTLLayer @@ -42,6 +43,7 @@ @implementation MTLLayer @synthesize leftInset; @synthesize nextDrawableCount; @synthesize displayLink; +@synthesize displayLinkCount; - (id) initWithJavaLayer:(jobject)layer { @@ -74,12 +76,15 @@ - (id) initWithJavaLayer:(jobject)layer self.opaque = YES; CVDisplayLinkCreateWithActiveCGDisplays(&displayLink); CVDisplayLinkSetOutputCallback(displayLink, &displayLinkCallback, (__bridge void*)self); + self.displayLinkCount = 0; return self; } - (void) blitTexture { if (self.ctx == NULL || self.javaLayer == NULL || self.buffer == nil || self.ctx.device == nil) { - J2dTraceLn4(J2D_TRACE_VERBOSE, "MTLLayer.blitTexture: uninitialized (mtlc=%p, javaLayer=%p, buffer=%p, devide=%p)", self.ctx, self.javaLayer, self.buffer, ctx.device); + J2dTraceLn4(J2D_TRACE_VERBOSE, + "MTLLayer.blitTexture: uninitialized (mtlc=%p, javaLayer=%p, buffer=%p, devide=%p)", self.ctx, + self.javaLayer, self.buffer, ctx.device); [self stopDisplayLink]; return; } @@ -100,9 +105,9 @@ - (void) blitTexture { NSUInteger src_h = self.buffer.height - src_y; if (src_h <= 0 || src_w <= 0) { - J2dTraceLn(J2D_TRACE_VERBOSE, "MTLLayer.blitTexture: Invalid src width or height."); - [self stopDisplayLink]; - return; + J2dTraceLn(J2D_TRACE_VERBOSE, "MTLLayer.blitTexture: Invalid src width or height."); + [self stopDisplayLink]; + return; } id commandBuf = [self.ctx createBlitCommandBuffer]; @@ -134,7 +139,11 @@ - (void) blitTexture { }]; [commandBuf commit]; - [self stopDisplayLink]; + + if (--self.displayLinkCount <= 0) { + self.displayLinkCount = 0; + [self stopDisplayLink]; + } } } @@ -177,13 +186,18 @@ - (void) redraw { } - (void) startDisplayLink { - if (!CVDisplayLinkIsRunning(self.displayLink)) + if (!CVDisplayLinkIsRunning(self.displayLink)) { CVDisplayLinkStart(self.displayLink); + J2dTraceLn(J2D_TRACE_VERBOSE, "MTLLayer_startDisplayLink"); + } + displayLinkCount += KEEP_ALIVE_INC; // Keep alive displaylink counter } - (void) stopDisplayLink { - if (CVDisplayLinkIsRunning(self.displayLink)) + if (CVDisplayLinkIsRunning(self.displayLink)) { CVDisplayLinkStop(self.displayLink); + J2dTraceLn(J2D_TRACE_VERBOSE, "MTLLayer_stopDisplayLink"); + } } CVReturn displayLinkCallback(CVDisplayLinkRef displayLink, const CVTimeStamp* now, const CVTimeStamp* outputTime, CVOptionFlags flagsIn, CVOptionFlags* flagsOut, void* displayLinkContext) diff --git a/src/java.desktop/share/classes/javax/swing/TimerQueue.java b/src/java.desktop/share/classes/javax/swing/TimerQueue.java index 6d067de2e979d..e1de594084f95 100644 --- a/src/java.desktop/share/classes/javax/swing/TimerQueue.java +++ b/src/java.desktop/share/classes/javax/swing/TimerQueue.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,15 +23,10 @@ * questions. */ - - package javax.swing; - - import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.*; import java.util.concurrent.*; import java.util.concurrent.locks.*; import java.util.concurrent.atomic.AtomicLong; @@ -49,8 +44,6 @@ class TimerQueue implements Runnable { private static final Object sharedInstanceKey = new StringBuffer("TimerQueue.sharedInstanceKey"); - private static final Object expiredTimersKey = - new StringBuffer("TimerQueue.expiredTimersKey"); private final DelayQueue queue; private volatile boolean running; @@ -268,7 +261,7 @@ static class DelayedTimer implements Delayed { public final long getDelay(TimeUnit unit) { - return unit.convert(time - now(), TimeUnit.NANOSECONDS); + return unit.convert(time - now(), TimeUnit.NANOSECONDS); } final void setTime(long nanos) { diff --git a/src/java.desktop/share/classes/javax/swing/text/DefaultStyledDocument.java b/src/java.desktop/share/classes/javax/swing/text/DefaultStyledDocument.java index 5d373177eaf0b..ded8cd945941e 100644 --- a/src/java.desktop/share/classes/javax/swing/text/DefaultStyledDocument.java +++ b/src/java.desktop/share/classes/javax/swing/text/DefaultStyledDocument.java @@ -484,6 +484,18 @@ public Style getLogicalStyle(int p) { * A write lock is held by this operation while changes * are being made, and a DocumentEvent is sent to the listeners * after the change has been successfully completed. + * + *

+ * {@code offset} and {@code length} define the range of the text + * over which the attributes are set. + * If the length is <= 0, then no action is taken and the method + * just returns. + * If the offset is <=0 or > the length of the text then no + * action is taken, and the method just returns. + * Otherwise if {@code offset + length} will exceed the length of + * the text then the affected range is truncated. + *

+ * *

* This method is thread safe, although most Swing methods * are not. Please see @@ -491,13 +503,13 @@ public Style getLogicalStyle(int p) { * in Swing for more information. * * @param offset the offset in the document >= 0 - * @param length the length >= 0 + * @param length the length > 0 * @param s the attributes * @param replace true if the previous attributes should be replaced * before setting the new attributes */ public void setCharacterAttributes(int offset, int length, AttributeSet s, boolean replace) { - if (length == 0) { + if (length <= 0) { return; } try { diff --git a/src/java.desktop/share/classes/sun/java2d/Disposer.java b/src/java.desktop/share/classes/sun/java2d/Disposer.java index e57483e66aefb..b64f77fe8a028 100644 --- a/src/java.desktop/share/classes/sun/java2d/Disposer.java +++ b/src/java.desktop/share/classes/sun/java2d/Disposer.java @@ -33,8 +33,8 @@ import java.lang.ref.WeakReference; import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.ArrayList; import java.util.Hashtable; +import java.util.concurrent.ConcurrentLinkedDeque; /** * This class is used for registering and disposing the native @@ -145,7 +145,7 @@ public void run() { Reference obj = queue.remove(); obj.clear(); DisposerRecord rec = records.remove(obj); - rec.dispose(); + safeDispose(rec); obj = null; rec = null; clearDeferredRecords(); @@ -164,21 +164,23 @@ public void run() { public static interface PollDisposable { }; - private static ArrayList deferredRecords = null; + private static ConcurrentLinkedDeque deferredRecords = new ConcurrentLinkedDeque<>(); - private static void clearDeferredRecords() { - if (deferredRecords == null || deferredRecords.isEmpty()) { - return; + private static void safeDispose(DisposerRecord rec) { + try { + rec.dispose(); + } catch (final Exception e) { + System.out.println("Exception while disposing deferred rec."); } - for (int i=0;i(5); - } - deferredRecords.add(rec); + deferredRecords.offerLast(rec); } } } catch (Exception e) { diff --git a/src/java.desktop/windows/native/libsplashscreen/splashscreen_sys.c b/src/java.desktop/windows/native/libsplashscreen/splashscreen_sys.c index 7725d1467e99c..37444a78a3400 100644 --- a/src/java.desktop/windows/native/libsplashscreen/splashscreen_sys.c +++ b/src/java.desktop/windows/native/libsplashscreen/splashscreen_sys.c @@ -442,7 +442,7 @@ SplashInitPlatform(Splash * splash) splash->isLayered = FALSE; hdc = GetDC(NULL); paletteMode = (GetDeviceCaps(hdc, RASTERCAPS) & RC_PALETTE) != 0; - if (UpdateLayeredWindow && !paletteMode) { + if (!paletteMode) { splash->isLayered = TRUE; } splash->byteAlignment = 4; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java index d2f8db5efb98c..976af5bac20b8 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java @@ -183,12 +183,6 @@ public NMethod asNMethodOrNull() { public boolean isFrameCompleteAt(Address a) { return codeContains(a) && a.minus(codeBegin()) >= getFrameCompleteOffset(); } - // Reclamation support (really only used by the nmethods, but in order to get asserts to work - // in the CodeCache they are defined virtual here) - public boolean isZombie() { return false; } - - public boolean isLockedByVM() { return false; } - public ImmutableOopMap getOopMapForReturnAddress(Address returnAddress, boolean debugging) { Address pc = returnAddress; if (Assert.ASSERTS_ENABLED) { diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeCache.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeCache.java index 1b95286395119..a87a500dc2f06 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeCache.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeCache.java @@ -92,9 +92,6 @@ public CodeBlob findBlob(Address start) { } // We could potientially look up non_entrant methods // NOTE: this is effectively a "guarantee", and is slightly different from the one in the VM - if (Assert.ASSERTS_ENABLED) { - Assert.that(!(result.isZombie() || result.isLockedByVM()), "unsafe access to zombie method"); - } return result; } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java index 02f49ae50f707..fd26185b99fe9 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java @@ -63,16 +63,6 @@ public class NMethod extends CompiledMethod { // FIXME: add access to flags (how?) - /** NMethod Flushing lock (if non-zero, then the nmethod is not removed) */ - private static JIntField lockCountField; - - /** not_entrant method removal. Each mark_sweep pass will update - this mark to current sweep invocation count if it is seen on the - stack. An not_entrant method can be removed when there is no - more activations, i.e., when the _stack_traversal_mark is less than - current sweep traversal index. */ - private static CIntegerField stackTraversalMarkField; - private static CIntegerField compLevelField; static { @@ -102,8 +92,6 @@ private static void initialize(TypeDataBase db) { entryPointField = type.getAddressField("_entry_point"); verifiedEntryPointField = type.getAddressField("_verified_entry_point"); osrEntryPointField = type.getAddressField("_osr_entry_point"); - lockCountField = type.getJIntField("_lock_count"); - stackTraversalMarkField = type.getCIntegerField("_stack_traversal_mark"); compLevelField = type.getCIntegerField("_comp_level"); pcDescSize = db.lookupType("PcDesc").getSize(); } @@ -215,16 +203,11 @@ public Method getMethodAt(int index) { // * FIXME: * ADD ACCESS TO FLAGS!!!! // ********** // public boolean isInUse(); - // public boolean isAlive(); // public boolean isNotEntrant(); - // public boolean isZombie(); // ******************************** // * MAJOR FIXME: MAJOR HACK HERE * // ******************************** - public boolean isZombie() { return false; } - - // public boolean isUnloaded(); // public boolean isYoung(); // public boolean isOld(); // public int age(); @@ -273,8 +256,6 @@ public boolean isMethodHandleReturn(Address returnPc) { // FIXME: add inline cache support // FIXME: add flush() - public boolean isLockedByVM() { return lockCountField.getValue(addr) > 0; } - // FIXME: add mark_as_seen_on_stack // FIXME: add can_not_entrant_be_converted diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/Debugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/Debugger.java index 72d4ce8c20dea..36b73d66a0559 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/Debugger.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/Debugger.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -132,7 +132,4 @@ which this debugger is running (to be able to properly configure public ReadResult readBytesFromProcess(long address, long numBytes) throws DebuggerException; - - public void writeBytesToProcess(long address, long numBytes, byte[] data) - throws UnmappedAddressException, DebuggerException; } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java index ca5548ea18711..a013a6c99f5a0 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -232,15 +232,6 @@ protected final byte[] readBytes(long address, long numBytes) } } - /** May be called by subclasses directly but may not be overridden */ - protected final void writeBytes(long address, long numBytes, byte[] data) - throws UnmappedAddressException, DebuggerException { - if (cache != null) { - cache.clear(address, numBytes); - } - writeBytesToProcess(address, numBytes, data); - } - public boolean readJBoolean(long address) throws UnmappedAddressException, UnalignedAddressException { checkJavaConfigured(); @@ -385,78 +376,6 @@ public long readCInteger(long address, long numBytes, boolean isUnsigned) } } - public void writeJBoolean(long address, boolean value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jbooleanSize); - byte[] data = utils.jbooleanToData(value); - writeBytes(address, jbooleanSize, data); - } - - public void writeJByte(long address, byte value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jbyteSize); - byte[] data = utils.jbyteToData(value); - writeBytes(address, jbyteSize, data); - } - - public void writeJChar(long address, char value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jcharSize); - byte[] data = utils.jcharToData(value); - writeBytes(address, jcharSize, data); - } - - public void writeJDouble(long address, double value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jdoubleSize); - byte[] data = utils.jdoubleToData(value); - writeBytes(address, jdoubleSize, data); - } - - public void writeJFloat(long address, float value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jfloatSize); - byte[] data = utils.jfloatToData(value); - writeBytes(address, jfloatSize, data); - } - - public void writeJInt(long address, int value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jintSize); - byte[] data = utils.jintToData(value); - writeBytes(address, jintSize, data); - } - - public void writeJLong(long address, long value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jlongSize); - byte[] data = utils.jlongToData(value); - writeBytes(address, jlongSize, data); - } - - public void writeJShort(long address, short value) - throws UnmappedAddressException, UnalignedAddressException { - checkJavaConfigured(); - utils.checkAlignment(address, jshortSize); - byte[] data = utils.jshortToData(value); - writeBytes(address, jshortSize, data); - } - - public void writeCInteger(long address, long numBytes, long value) - throws UnmappedAddressException, UnalignedAddressException { - checkConfigured(); - utils.checkAlignment(address, numBytes); - byte[] data = utils.cIntegerToData(numBytes, value); - writeBytes(address, numBytes, data); - } - protected long readAddressValue(long address) throws UnmappedAddressException, UnalignedAddressException { return readCInteger(address, machDesc.getAddressSize(), true); @@ -481,11 +400,6 @@ protected long readCompKlassAddressValue(long address) return value; } - protected void writeAddressValue(long address, long value) - throws UnmappedAddressException, UnalignedAddressException { - writeCInteger(address, machDesc.getAddressSize(), value); - } - /** Can be called by subclasses but can not be overridden */ protected final void checkConfigured() { if (machDesc == null) { diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java index 1d1196b409c23..e46a27c144b99 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java @@ -204,18 +204,8 @@ public void checkAlignment(long address, long alignment) { }; if (useCache) { - // FIXME: re-test necessity of cache on Bsd, where data - // fetching is faster - // Cache portion of the remote process's address space. - // Fetching data over the socket connection to dbx is slow. - // Might be faster if we were using a binary protocol to talk to - // dbx, but would have to test. For now, this cache works best - // if it covers the entire heap of the remote process. FIXME: at - // least should make this tunable from the outside, i.e., via - // the UI. This is a cache of 4096 4K pages, or 16 MB. The page - // size must be adjusted to be the hardware's page size. - // (FIXME: should pick this up from the debugger.) - initCache(4096, parseCacheNumPagesProperty(4096)); + // This is a cache of 64k of 4K pages, or 256 MB. + initCache(4096, parseCacheNumPagesProperty(1024 * 64)); } isDarwin = getOS().equals("darwin"); @@ -603,12 +593,6 @@ public void doit(BsdDebuggerLocal debugger) { } } - public void writeBytesToProcess(long address, long numBytes, byte[] data) - throws UnmappedAddressException, DebuggerException { - // FIXME - throw new DebuggerException("Unimplemented"); - } - /** this functions used for core file reading and called from native attach0, it returns an array of long integers as [thread_id, stack_start, stack_end, thread_id, stack_start, stack_end, ....] for diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/dummy/DummyDebugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/dummy/DummyDebugger.java index c9100efd4b167..6a24466e406b8 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/dummy/DummyDebugger.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/dummy/DummyDebugger.java @@ -126,11 +126,6 @@ public ReadResult readBytesFromProcess(long address, long numBytes) throw new DebuggerException("Unimplemented"); } - public void writeBytesToProcess(long a, long b, byte[] buf) - throws DebuggerException { - throw new DebuggerException("Unimplemented"); - } - //---------------------------------------------------------------------- // Package-internal routines // diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java index ebf8960b6906f..b22d0afb37d9f 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java @@ -223,18 +223,8 @@ public void checkAlignment(long address, long alignment) { }; if (useCache) { - // FIXME: re-test necessity of cache on Linux, where data - // fetching is faster - // Cache portion of the remote process's address space. - // Fetching data over the socket connection to dbx is slow. - // Might be faster if we were using a binary protocol to talk to - // dbx, but would have to test. For now, this cache works best - // if it covers the entire heap of the remote process. FIXME: at - // least should make this tunable from the outside, i.e., via - // the UI. This is a cache of 4096 4K pages, or 16 MB. The page - // size must be adjusted to be the hardware's page size. - // (FIXME: should pick this up from the debugger.) - initCache(4096, parseCacheNumPagesProperty(4096)); + // This is a cache of 64k of 4K pages, or 256 MB. + initCache(4096, parseCacheNumPagesProperty(1024 * 64)); } workerThread = new LinuxDebuggerLocalWorkerThread(this); @@ -660,12 +650,6 @@ public void doit(LinuxDebuggerLocal debugger) { } } - public void writeBytesToProcess(long address, long numBytes, byte[] data) - throws UnmappedAddressException, DebuggerException { - // FIXME - throw new DebuggerException("Unimplemented"); - } - static { System.loadLibrary("saproc"); init0(); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java deleted file mode 100644 index ab6028c651e7f..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc; - -import sun.jvm.hotspot.debugger.*; - -class ProcAddress implements Address { - protected ProcDebugger debugger; - protected long addr; - - ProcAddress(ProcDebugger debugger, long addr) { - this.debugger = debugger; - this.addr = addr; - } - - // - // Basic Java routines - // - - public boolean equals(Object arg) { - if (arg == null) { - return false; - } - - if (!(arg instanceof ProcAddress)) { - return false; - } - - return (addr == ((ProcAddress) arg).addr); - } - - public int hashCode() { - return Long.hashCode(addr); - } - - public String toString() { - return debugger.addressValueToString(addr); - } - - // - // C/C++-related routines - // - - public long getCIntegerAt(long offset, long numBytes, boolean isUnsigned) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readCInteger(addr + offset, numBytes, isUnsigned); - } - - public Address getAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readAddress(addr + offset); - } - - public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readCompOopAddress(addr + offset); - } - - public Address getCompKlassAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readCompKlassAddress(addr + offset); - } - - // - // Java-related routines - // - - public boolean getJBooleanAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJBoolean(addr + offset); - } - - public byte getJByteAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJByte(addr + offset); - } - - public char getJCharAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJChar(addr + offset); - } - - public double getJDoubleAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJDouble(addr + offset); - } - - public float getJFloatAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJFloat(addr + offset); - } - - public int getJIntAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJInt(addr + offset); - } - - public long getJLongAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJLong(addr + offset); - } - - public short getJShortAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readJShort(addr + offset); - } - - public OopHandle getOopHandleAt(long offset) - throws UnalignedAddressException, UnmappedAddressException, NotInHeapException { - return debugger.readOopHandle(addr + offset); - } - public OopHandle getCompOopHandleAt(long offset) - throws UnalignedAddressException, UnmappedAddressException, NotInHeapException { - return debugger.readCompOopHandle(addr + offset); - } - - // Mutators -- not implemented for now (FIXME) - public void setCIntegerAt(long offset, long numBytes, long value) { - throw new DebuggerException("Unimplemented"); - } - public void setAddressAt(long offset, Address value) { - throw new DebuggerException("Unimplemented"); - } - public void setJBooleanAt (long offset, boolean value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setJByteAt (long offset, byte value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setJCharAt (long offset, char value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setJDoubleAt (long offset, double value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setJFloatAt (long offset, float value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setJIntAt (long offset, int value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setJLongAt (long offset, long value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setJShortAt (long offset, short value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - public void setOopHandleAt (long offset, OopHandle value) - throws UnmappedAddressException, UnalignedAddressException { - throw new DebuggerException("Unimplemented"); - } - - // - // Arithmetic operations -- necessary evil. - // - - public Address addOffsetTo (long offset) throws UnsupportedOperationException { - long value = addr + offset; - if (value == 0) { - return null; - } - return new ProcAddress(debugger, value); - } - - public OopHandle addOffsetToAsOopHandle(long offset) throws UnsupportedOperationException { - long value = addr + offset; - if (value == 0) { - return null; - } - return new ProcOopHandle(debugger, value); - } - - /** (FIXME: any signed/unsigned issues? Should this work for - OopHandles?) */ - public long minus(Address arg) { - if (arg == null) { - return addr; - } - return addr - ((ProcAddress) arg).addr; - } - - // Two's complement representation. - // All negative numbers are larger than positive numbers. - // Numbers with the same sign can be compared normally. - // Test harness is below in main(). - - public boolean lessThan (Address arg) { - if (arg == null) { - return false; - } - ProcAddress dbxArg = (ProcAddress) arg; - if ((addr >= 0) && (dbxArg.addr < 0)) { - return true; - } - if ((addr < 0) && (dbxArg.addr >= 0)) { - return false; - } - return (addr < dbxArg.addr); - } - - public boolean lessThanOrEqual (Address arg) { - if (arg == null) { - return false; - } - ProcAddress dbxArg = (ProcAddress) arg; - if ((addr >= 0) && (dbxArg.addr < 0)) { - return true; - } - if ((addr < 0) && (dbxArg.addr >= 0)) { - return false; - } - return (addr <= dbxArg.addr); - } - - public boolean greaterThan (Address arg) { - if (arg == null) { - return true; - } - ProcAddress dbxArg = (ProcAddress) arg; - if ((addr >= 0) && (dbxArg.addr < 0)) { - return false; - } - if ((addr < 0) && (dbxArg.addr >= 0)) { - return true; - } - return (addr > dbxArg.addr); - } - - public boolean greaterThanOrEqual(Address arg) { - if (arg == null) { - return true; - } - ProcAddress dbxArg = (ProcAddress) arg; - if ((addr >= 0) && (dbxArg.addr < 0)) { - return false; - } - if ((addr < 0) && (dbxArg.addr >= 0)) { - return true; - } - return (addr >= dbxArg.addr); - } - - public Address andWithMask(long mask) throws UnsupportedOperationException { - long value = addr & mask; - if (value == 0) { - return null; - } - return new ProcAddress(debugger, value); - } - - public Address orWithMask(long mask) throws UnsupportedOperationException { - long value = addr | mask; - if (value == 0) { - return null; - } - return new ProcAddress(debugger, value); - } - - public Address xorWithMask(long mask) throws UnsupportedOperationException { - long value = addr ^ mask; - if (value == 0) { - return null; - } - return new ProcAddress(debugger, value); - } - - public long asLongValue() { return addr; } - //-------------------------------------------------------------------------------- - // Internals only below this point - // - - private static void check(boolean arg, String failMessage) { - if (!arg) { - System.err.println(failMessage + ": FAILED"); - System.exit(1); - } - } - - // Test harness - public static void main(String[] args) { - // p/n indicates whether the interior address is really positive - // or negative. In unsigned terms, p1 < p2 < n1 < n2. - - ProcAddress p1 = new ProcAddress(null, 0x7FFFFFFFFFFFFFF0L); - ProcAddress p2 = (ProcAddress) p1.addOffsetTo(10); - ProcAddress n1 = (ProcAddress) p2.addOffsetTo(10); - ProcAddress n2 = (ProcAddress) n1.addOffsetTo(10); - - // lessThan positive tests - check(p1.lessThan(p2), "lessThan 1"); - check(p1.lessThan(n1), "lessThan 2"); - check(p1.lessThan(n2), "lessThan 3"); - check(p2.lessThan(n1), "lessThan 4"); - check(p2.lessThan(n2), "lessThan 5"); - check(n1.lessThan(n2), "lessThan 6"); - - // lessThan negative tests - check(!p1.lessThan(p1), "lessThan 7"); - check(!p2.lessThan(p2), "lessThan 8"); - check(!n1.lessThan(n1), "lessThan 9"); - check(!n2.lessThan(n2), "lessThan 10"); - - check(!p2.lessThan(p1), "lessThan 11"); - check(!n1.lessThan(p1), "lessThan 12"); - check(!n2.lessThan(p1), "lessThan 13"); - check(!n1.lessThan(p2), "lessThan 14"); - check(!n2.lessThan(p2), "lessThan 15"); - check(!n2.lessThan(n1), "lessThan 16"); - - // lessThanOrEqual positive tests - check(p1.lessThanOrEqual(p1), "lessThanOrEqual 1"); - check(p2.lessThanOrEqual(p2), "lessThanOrEqual 2"); - check(n1.lessThanOrEqual(n1), "lessThanOrEqual 3"); - check(n2.lessThanOrEqual(n2), "lessThanOrEqual 4"); - - check(p1.lessThanOrEqual(p2), "lessThanOrEqual 5"); - check(p1.lessThanOrEqual(n1), "lessThanOrEqual 6"); - check(p1.lessThanOrEqual(n2), "lessThanOrEqual 7"); - check(p2.lessThanOrEqual(n1), "lessThanOrEqual 8"); - check(p2.lessThanOrEqual(n2), "lessThanOrEqual 9"); - check(n1.lessThanOrEqual(n2), "lessThanOrEqual 10"); - - // lessThanOrEqual negative tests - check(!p2.lessThanOrEqual(p1), "lessThanOrEqual 11"); - check(!n1.lessThanOrEqual(p1), "lessThanOrEqual 12"); - check(!n2.lessThanOrEqual(p1), "lessThanOrEqual 13"); - check(!n1.lessThanOrEqual(p2), "lessThanOrEqual 14"); - check(!n2.lessThanOrEqual(p2), "lessThanOrEqual 15"); - check(!n2.lessThanOrEqual(n1), "lessThanOrEqual 16"); - - // greaterThan positive tests - check(n2.greaterThan(p1), "greaterThan 1"); - check(n2.greaterThan(p2), "greaterThan 2"); - check(n2.greaterThan(n1), "greaterThan 3"); - check(n1.greaterThan(p1), "greaterThan 4"); - check(n1.greaterThan(p2), "greaterThan 5"); - check(p2.greaterThan(p1), "greaterThan 6"); - - // greaterThan negative tests - check(!p1.greaterThan(p1), "greaterThan 7"); - check(!p2.greaterThan(p2), "greaterThan 8"); - check(!n1.greaterThan(n1), "greaterThan 9"); - check(!n2.greaterThan(n2), "greaterThan 10"); - - check(!p1.greaterThan(n2), "greaterThan 11"); - check(!p2.greaterThan(n2), "greaterThan 12"); - check(!n1.greaterThan(n2), "greaterThan 13"); - check(!p1.greaterThan(n1), "greaterThan 14"); - check(!p2.greaterThan(n1), "greaterThan 15"); - check(!p1.greaterThan(p2), "greaterThan 16"); - - // greaterThanOrEqual positive tests - check(p1.greaterThanOrEqual(p1), "greaterThanOrEqual 1"); - check(p2.greaterThanOrEqual(p2), "greaterThanOrEqual 2"); - check(n1.greaterThanOrEqual(n1), "greaterThanOrEqual 3"); - check(n2.greaterThanOrEqual(n2), "greaterThanOrEqual 4"); - - check(n2.greaterThanOrEqual(p1), "greaterThanOrEqual 5"); - check(n2.greaterThanOrEqual(p2), "greaterThanOrEqual 6"); - check(n2.greaterThanOrEqual(n1), "greaterThanOrEqual 7"); - check(n1.greaterThanOrEqual(p1), "greaterThanOrEqual 8"); - check(n1.greaterThanOrEqual(p2), "greaterThanOrEqual 9"); - check(p2.greaterThanOrEqual(p1), "greaterThanOrEqual 10"); - - // greaterThanOrEqual negative tests - check(!p1.greaterThanOrEqual(n2), "greaterThanOrEqual 11"); - check(!p2.greaterThanOrEqual(n2), "greaterThanOrEqual 12"); - check(!n1.greaterThanOrEqual(n2), "greaterThanOrEqual 13"); - check(!p1.greaterThanOrEqual(n1), "greaterThanOrEqual 14"); - check(!p2.greaterThanOrEqual(n1), "greaterThanOrEqual 15"); - check(!p1.greaterThanOrEqual(p2), "greaterThanOrEqual 16"); - - System.err.println("ProcAddress: all tests passed successfully."); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcCDebugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcCDebugger.java deleted file mode 100644 index 9a6d2dd1aea24..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcCDebugger.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc; - -import java.io.*; -import java.util.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.utilities.*; - -class ProcCDebugger implements CDebugger { - private ProcDebugger dbg; - - ProcCDebugger(ProcDebugger dbg) { - this.dbg = dbg; - } - - public List getThreadList() throws DebuggerException { - return dbg.getThreadList(); - } - - public List getLoadObjectList() throws DebuggerException { - return dbg.getLoadObjectList(); - } - - public LoadObject loadObjectContainingPC(Address pc) throws DebuggerException { - if (pc == null) { - return null; - } - List objs = getLoadObjectList(); - Object[] arr = objs.toArray(); - // load objects are sorted by base address, do binary search - int mid = -1; - int low = 0; - int high = arr.length - 1; - - while (low <= high) { - mid = (low + high) >> 1; - LoadObject midVal = (LoadObject) arr[mid]; - long cmp = pc.minus(midVal.getBase()); - if (cmp < 0) { - high = mid - 1; - } else if (cmp > 0) { - long size = midVal.getSize(); - if (cmp >= size) { - low = mid + 1; - } else { - return (LoadObject) arr[mid]; - } - } else { // match found - return (LoadObject) arr[mid]; - } - } - // no match found. - return null; - } - - public CFrame topFrameForThread(ThreadProxy thread) throws DebuggerException { - return dbg.topFrameForThread(thread); - } - - public String getNameOfFile(String fileName) { - return new File(fileName).getName(); - } - - public ProcessControl getProcessControl() throws DebuggerException { - // FIXME: after stabs parser - return null; - } - - // C++ name demangling - public boolean canDemangle() { - return true; - } - - public String demangle(String sym) { - return dbg.demangle(sym); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcCFrame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcCFrame.java deleted file mode 100644 index 83d5f677a6e19..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcCFrame.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.debugger.cdbg.basic.*; - -final class ProcCFrame extends BasicCFrame { - public Address pc() { - return pc; - } - - public Address localVariableBase() { - return fp; - } - - public CFrame sender(ThreadProxy t) { - return sender; - } - - public ClosestSymbol closestSymbolToPC() { - // we don't use base class ELF parsing based - // symbol lookup for pc for performance reasons. - return procDbg.lookup(procDbg.getAddressValue(pc)); - } - - // package/class internals only - - ProcCFrame(ProcDebugger dbg, Address pc, Address fp) { - super(dbg.getCDebugger()); - this.pc = pc; - this.fp = fp; - this.procDbg = dbg; - } - - void setSender(ProcCFrame sender) { - this.sender = sender; - } - - private Address pc; - private Address fp; - private ProcCFrame sender; - private ProcDebugger procDbg; -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java deleted file mode 100644 index bb7b3046f88f6..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc; - -import java.util.List; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.cdbg.*; - -/** An extension of the JVMDebugger interface with a few additions to - support 32-bit vs. 64-bit debugging as well as features required - by the architecture-specific subpackages. */ - -public interface ProcDebugger extends JVMDebugger { - public MachineDescription getMachineDescription() throws DebuggerException; - public String addressValueToString(long address) throws DebuggerException; - public boolean readJBoolean(long address) throws DebuggerException; - public byte readJByte(long address) throws DebuggerException; - public char readJChar(long address) throws DebuggerException; - public double readJDouble(long address) throws DebuggerException; - public float readJFloat(long address) throws DebuggerException; - public int readJInt(long address) throws DebuggerException; - public long readJLong(long address) throws DebuggerException; - public short readJShort(long address) throws DebuggerException; - public long readCInteger(long address, long numBytes, boolean isUnsigned) - throws DebuggerException; - public ProcAddress readAddress(long address) throws DebuggerException; - public ProcAddress readCompOopAddress(long address) throws DebuggerException; - public ProcAddress readCompKlassAddress(long address) throws DebuggerException; - public ProcOopHandle readOopHandle(long address) throws DebuggerException; - public ProcOopHandle readCompOopHandle(long address) throws DebuggerException; - public long[] getThreadIntegerRegisterSet(int tid) throws DebuggerException; - public long getAddressValue(Address addr) throws DebuggerException; - - // for ProcCDebugger, ProcCFrame and SharedObject - public List getThreadList() throws DebuggerException; - public List getLoadObjectList() throws DebuggerException; - public CFrame topFrameForThread(ThreadProxy thread) throws DebuggerException; - public ClosestSymbol lookup(long address) throws DebuggerException; - public String demangle(String name); -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcOopHandle.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcOopHandle.java deleted file mode 100644 index 485bcf701a132..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcOopHandle.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc; - -import sun.jvm.hotspot.debugger.*; - -class ProcOopHandle extends ProcAddress implements OopHandle { - ProcOopHandle(ProcDebugger debugger, long addr) { - super(debugger, addr); - } - - public Address addOffsetTo (long offset) throws UnsupportedOperationException { - throw new UnsupportedOperationException("addOffsetTo not applicable to OopHandles (interior object pointers not allowed)"); - } - - public Address andWithMask(long mask) throws UnsupportedOperationException { - throw new UnsupportedOperationException("andWithMask not applicable to OopHandles (i.e., anything but C addresses)"); - } - - public Address orWithMask(long mask) throws UnsupportedOperationException { - throw new UnsupportedOperationException("orWithMask not applicable to OopHandles (i.e., anything but C addresses)"); - } - - public Address xorWithMask(long mask) throws UnsupportedOperationException { - throw new UnsupportedOperationException("xorWithMask not applicable to OopHandles (i.e., anything but C addresses)"); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcThreadFactory.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcThreadFactory.java deleted file mode 100644 index c27631d4bc20c..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ProcThreadFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc; - -import sun.jvm.hotspot.debugger.*; - -/** An interface used only internally by the ProcDebugger to be able to - create platform-specific Thread objects */ - -public interface ProcThreadFactory { - public ThreadProxy createThreadWrapper(Address threadIdentifierAddr); - public ThreadProxy createThreadWrapper(long id); -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/SharedObject.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/SharedObject.java deleted file mode 100644 index 480b33a9768c0..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/SharedObject.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.cdbg.*; -import sun.jvm.hotspot.debugger.posix.*; - -/** An Object can represent either a .so or an a.out file. */ - -class SharedObject extends DSO { - SharedObject(ProcDebugger dbg, String filename, long size, Address relocation) { - super(filename, size, relocation); - this.dbg = dbg; - } - - protected Address newAddress(long address) { - return dbg.newAddress(address); - } - - protected long getAddressValue(Address addr) { - return dbg.getAddressValue(addr); - } - - public ClosestSymbol closestSymbolToPC(Address pcAsAddr) throws DebuggerException { - return dbg.lookup(dbg.getAddressValue(pcAsAddr)); - } - - private ProcDebugger dbg; -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64Thread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64Thread.java deleted file mode 100644 index 04f97bea1939a..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64Thread.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2004, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, Red Hat Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.aarch64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.aarch64.*; -import sun.jvm.hotspot.debugger.proc.*; -import sun.jvm.hotspot.utilities.*; - -public class ProcAARCH64Thread implements ThreadProxy { - private ProcDebugger debugger; - private int id; - - public ProcAARCH64Thread(ProcDebugger debugger, Address addr) { - this.debugger = debugger; - - // FIXME: the size here should be configurable. However, making it - // so would produce a dependency on the "types" package from the - // debugger package, which is not desired. - this.id = (int) addr.getCIntegerAt(0, 4, true); - } - - public ProcAARCH64Thread(ProcDebugger debugger, long id) { - this.debugger = debugger; - this.id = (int) id; - } - - public ThreadContext getContext() throws IllegalThreadStateException { - ProcAARCH64ThreadContext context = new ProcAARCH64ThreadContext(debugger); - long[] regs = debugger.getThreadIntegerRegisterSet(id); - if (Assert.ASSERTS_ENABLED) { - Assert.that(regs.length == AARCH64ThreadContext.NPRGREG, "size mismatch"); - } - for (int i = 0; i < regs.length; i++) { - context.setRegister(i, regs[i]); - } - return context; - } - - public boolean canSetContext() throws DebuggerException { - return false; - } - - public void setContext(ThreadContext context) - throws IllegalThreadStateException, DebuggerException { - throw new DebuggerException("Unimplemented"); - } - - public String toString() { - return "t@" + id; - } - - public boolean equals(Object obj) { - if (!(obj instanceof ProcAARCH64Thread other)) { - return false; - } - - return (other.id == id); - } - - public int hashCode() { - return id; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadContext.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadContext.java deleted file mode 100644 index 9d3cbc53d5de3..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadContext.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, Red Hat Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.aarch64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.aarch64.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcAARCH64ThreadContext extends AARCH64ThreadContext { - private ProcDebugger debugger; - - public ProcAARCH64ThreadContext(ProcDebugger debugger) { - super(); - this.debugger = debugger; - } - - public void setRegisterAsAddress(int index, Address value) { - setRegister(index, debugger.getAddressValue(value)); - } - - public Address getRegisterAsAddress(int index) { - return debugger.newAddress(getRegister(index)); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadFactory.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadFactory.java deleted file mode 100644 index 392ed8b0b1635..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/aarch64/ProcAARCH64ThreadFactory.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, Red Hat Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.aarch64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcAARCH64ThreadFactory implements ProcThreadFactory { - private ProcDebugger debugger; - - public ProcAARCH64ThreadFactory(ProcDebugger debugger) { - this.debugger = debugger; - } - - public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { - return new ProcAARCH64Thread(debugger, threadIdentifierAddr); - } - - public ThreadProxy createThreadWrapper(long id) { - return new ProcAARCH64Thread(debugger, id); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64Thread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64Thread.java deleted file mode 100644 index 0abf453a0d8e2..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64Thread.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2004, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.amd64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.amd64.*; -import sun.jvm.hotspot.debugger.proc.*; -import sun.jvm.hotspot.utilities.*; - -public class ProcAMD64Thread implements ThreadProxy { - private ProcDebugger debugger; - private int id; - - public ProcAMD64Thread(ProcDebugger debugger, Address addr) { - this.debugger = debugger; - - // FIXME: the size here should be configurable. However, making it - // so would produce a dependency on the "types" package from the - // debugger package, which is not desired. - this.id = (int) addr.getCIntegerAt(0, 4, true); - } - - public ProcAMD64Thread(ProcDebugger debugger, long id) { - this.debugger = debugger; - this.id = (int) id; - } - - public ThreadContext getContext() throws IllegalThreadStateException { - ProcAMD64ThreadContext context = new ProcAMD64ThreadContext(debugger); - long[] regs = debugger.getThreadIntegerRegisterSet(id); - if (Assert.ASSERTS_ENABLED) { - Assert.that(regs.length == AMD64ThreadContext.NPRGREG, "size mismatch"); - } - for (int i = 0; i < regs.length; i++) { - context.setRegister(i, regs[i]); - } - return context; - } - - public boolean canSetContext() throws DebuggerException { - return false; - } - - public void setContext(ThreadContext context) - throws IllegalThreadStateException, DebuggerException { - throw new DebuggerException("Unimplemented"); - } - - public String toString() { - return "t@" + id; - } - - public boolean equals(Object obj) { - if (!(obj instanceof ProcAMD64Thread other)) { - return false; - } - - return (other.id == id); - } - - public int hashCode() { - return id; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64ThreadFactory.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64ThreadFactory.java deleted file mode 100644 index 26b5647a857a1..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/amd64/ProcAMD64ThreadFactory.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.amd64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcAMD64ThreadFactory implements ProcThreadFactory { - private ProcDebugger debugger; - - public ProcAMD64ThreadFactory(ProcDebugger debugger) { - this.debugger = debugger; - } - - public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { - return new ProcAMD64Thread(debugger, threadIdentifierAddr); - } - - public ThreadProxy createThreadWrapper(long id) { - return new ProcAMD64Thread(debugger, id); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64Thread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64Thread.java deleted file mode 100644 index 4b9aff204d531..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64Thread.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.ppc64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.ppc64.*; -import sun.jvm.hotspot.debugger.proc.*; -import sun.jvm.hotspot.utilities.*; - -public class ProcPPC64Thread implements ThreadProxy { - private ProcDebugger debugger; - private int id; - - public ProcPPC64Thread(ProcDebugger debugger, Address addr) { - this.debugger = debugger; - - // FIXME: the size here should be configurable. However, making it - // so would produce a dependency on the "types" package from the - // debugger package, which is not desired. - this.id = (int) addr.getCIntegerAt(0, 4, true); - } - - public ProcPPC64Thread(ProcDebugger debugger, long id) { - this.debugger = debugger; - this.id = (int) id; - } - - public ThreadContext getContext() throws IllegalThreadStateException { - ProcPPC64ThreadContext context = new ProcPPC64ThreadContext(debugger); - long[] regs = debugger.getThreadIntegerRegisterSet(id); - if (Assert.ASSERTS_ENABLED) { - Assert.that(regs.length <= PPC64ThreadContext.NPRGREG, "size of register set is greater than " + PPC64ThreadContext.NPRGREG); - } - for (int i = 0; i < regs.length; i++) { - context.setRegister(i, regs[i]); - } - return context; - } - - public boolean canSetContext() throws DebuggerException { - return false; - } - - public void setContext(ThreadContext context) - throws IllegalThreadStateException, DebuggerException { - throw new DebuggerException("Unimplemented"); - } - - public String toString() { - return "t@" + id; - } - - public boolean equals(Object obj) { - if (!(obj instanceof ProcPPC64Thread other)) { - return false; - } - - return (other.id == id); - } - - public int hashCode() { - return id; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64ThreadFactory.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64ThreadFactory.java deleted file mode 100644 index 115b0fd3074a8..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64ThreadFactory.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.ppc64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcPPC64ThreadFactory implements ProcThreadFactory { - private ProcDebugger debugger; - - public ProcPPC64ThreadFactory(ProcDebugger debugger) { - this.debugger = debugger; - } - - public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { - return new ProcPPC64Thread(debugger, threadIdentifierAddr); - } - - public ThreadProxy createThreadWrapper(long id) { - return new ProcPPC64Thread(debugger, id); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64Thread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64Thread.java deleted file mode 100644 index db89bc10ed665..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64Thread.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2004, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, Red Hat Inc. - * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.riscv64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.riscv64.*; -import sun.jvm.hotspot.debugger.proc.*; -import sun.jvm.hotspot.utilities.*; - -public class ProcRISCV64Thread implements ThreadProxy { - private ProcDebugger debugger; - private int id; - - public ProcRISCV64Thread(ProcDebugger debugger, Address addr) { - this.debugger = debugger; - - // FIXME: the size here should be configurable. However, making it - // so would produce a dependency on the "types" package from the - // debugger package, which is not desired. - this.id = (int) addr.getCIntegerAt(0, 4, true); - } - - public ProcRISCV64Thread(ProcDebugger debugger, long id) { - this.debugger = debugger; - this.id = (int) id; - } - - public ThreadContext getContext() throws IllegalThreadStateException { - ProcRISCV64ThreadContext context = new ProcRISCV64ThreadContext(debugger); - long[] regs = debugger.getThreadIntegerRegisterSet(id); - if (Assert.ASSERTS_ENABLED) { - Assert.that(regs.length == RISCV64ThreadContext.NPRGREG, "size mismatch"); - } - for (int i = 0; i < regs.length; i++) { - context.setRegister(i, regs[i]); - } - return context; - } - - public boolean canSetContext() throws DebuggerException { - return false; - } - - public void setContext(ThreadContext context) - throws IllegalThreadStateException, DebuggerException { - throw new DebuggerException("Unimplemented"); - } - - public String toString() { - return "t@" + id; - } - - public boolean equals(Object obj) { - if (!(obj instanceof ProcRISCV64Thread other)) { - return false; - } - - return (other.id == id); - } - - public int hashCode() { - return id; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadContext.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadContext.java deleted file mode 100644 index f2aa845e665c8..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadContext.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, Red Hat Inc. - * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.riscv64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.riscv64.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcRISCV64ThreadContext extends RISCV64ThreadContext { - private ProcDebugger debugger; - - public ProcRISCV64ThreadContext(ProcDebugger debugger) { - super(); - this.debugger = debugger; - } - - public void setRegisterAsAddress(int index, Address value) { - setRegister(index, debugger.getAddressValue(value)); - } - - public Address getRegisterAsAddress(int index) { - return debugger.newAddress(getRegister(index)); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadFactory.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadFactory.java deleted file mode 100644 index 19f64b8ce2dc8..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/riscv64/ProcRISCV64ThreadFactory.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, Red Hat Inc. - * Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.riscv64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcRISCV64ThreadFactory implements ProcThreadFactory { - private ProcDebugger debugger; - - public ProcRISCV64ThreadFactory(ProcDebugger debugger) { - this.debugger = debugger; - } - - public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { - return new ProcRISCV64Thread(debugger, threadIdentifierAddr); - } - - public ThreadProxy createThreadWrapper(long id) { - return new ProcRISCV64Thread(debugger, id); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86Thread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86Thread.java deleted file mode 100644 index 65f126259a104..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86Thread.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.x86; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.x86.*; -import sun.jvm.hotspot.debugger.proc.*; -import sun.jvm.hotspot.utilities.*; - -public class ProcX86Thread implements ThreadProxy { - private ProcDebugger debugger; - private int id; - - public ProcX86Thread(ProcDebugger debugger, Address addr) { - this.debugger = debugger; - - // FIXME: the size here should be configurable. However, making it - // so would produce a dependency on the "types" package from the - // debugger package, which is not desired. - this.id = (int) addr.getCIntegerAt(0, 4, true); - } - - public ProcX86Thread(ProcDebugger debugger, long id) { - this.debugger = debugger; - this.id = (int) id; - } - - public ThreadContext getContext() throws IllegalThreadStateException { - ProcX86ThreadContext context = new ProcX86ThreadContext(debugger); - long[] regs = debugger.getThreadIntegerRegisterSet(id); - /* - _NGREG in reg.h is defined to be 19. Because we have included - debug registers X86ThreadContext.NPRGREG is 25. - */ - - if (Assert.ASSERTS_ENABLED) { - Assert.that(regs.length <= X86ThreadContext.NPRGREG, "size of register set is greater than " + X86ThreadContext.NPRGREG); - } - for (int i = 0; i < regs.length; i++) { - context.setRegister(i, regs[i]); - } - return context; - } - - public boolean canSetContext() throws DebuggerException { - return false; - } - - public void setContext(ThreadContext context) - throws IllegalThreadStateException, DebuggerException { - throw new DebuggerException("Unimplemented"); - } - - public String toString() { - return "t@" + id; - } - - public boolean equals(Object obj) { - if (!(obj instanceof ProcX86Thread other)) { - return false; - } - - return (other.id == id); - } - - public int hashCode() { - return id; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86ThreadContext.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86ThreadContext.java deleted file mode 100644 index 744b154d32f20..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86ThreadContext.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.debugger.proc.x86; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.x86.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcX86ThreadContext extends X86ThreadContext { - private ProcDebugger debugger; - - public ProcX86ThreadContext(ProcDebugger debugger) { - super(); - this.debugger = debugger; - } - - public void setRegisterAsAddress(int index, Address value) { - setRegister(index, debugger.getAddressValue(value)); - } - - public Address getRegisterAsAddress(int index) { - return debugger.newAddress(getRegister(index)); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java index ef7f7f730c47f..57770f3167c34 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java @@ -51,7 +51,7 @@ public class RemoteDebuggerClient extends DebuggerBase implements JVMDebugger { private RemoteDebugger remoteDebugger; private RemoteThreadFactory threadFactory; private boolean unalignedAccessesOkay = false; - private static final int cacheSize = 16 * 1024 * 1024; // 16 MB + private static final int cacheSize = 256 * 1024 * 1024; // 256 MB public RemoteDebuggerClient(RemoteDebugger remoteDebugger) throws DebuggerException { super(); @@ -59,24 +59,17 @@ public RemoteDebuggerClient(RemoteDebugger remoteDebugger) throws DebuggerExcept this.remoteDebugger = remoteDebugger; machDesc = remoteDebugger.getMachineDescription(); utils = new DebuggerUtilities(machDesc.getAddressSize(), machDesc.isBigEndian()); - int cacheNumPages; - int cachePageSize; + int cachePageSize = 4096; + int cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); String cpu = remoteDebugger.getCPU(); - // page size. (FIXME: should pick this up from the remoteDebugger.) if (cpu.equals("x86")) { threadFactory = new RemoteX86ThreadFactory(this); - cachePageSize = 4096; - cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); unalignedAccessesOkay = true; } else if (cpu.equals("amd64") || cpu.equals("x86_64")) { threadFactory = new RemoteAMD64ThreadFactory(this); - cachePageSize = 4096; - cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); unalignedAccessesOkay = true; } else if (cpu.equals("ppc64")) { threadFactory = new RemotePPC64ThreadFactory(this); - cachePageSize = 4096; - cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); unalignedAccessesOkay = true; } else if (cpu.equals("loongarch64")) { threadFactory = new RemoteLOONGARCH64ThreadFactory(this); @@ -93,8 +86,6 @@ public RemoteDebuggerClient(RemoteDebugger remoteDebugger) throws DebuggerExcept } catch (Exception e) { throw new DebuggerException("Thread access for CPU architecture " + cpu + " not yet supported"); } - cachePageSize = 4096; - cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); unalignedAccessesOkay = false; } @@ -424,10 +415,6 @@ public ReadResult readBytesFromProcess(long address, long numBytes) { } } - public void writeBytesToProcess(long a, long b, byte[] c) { - throw new DebuggerException("Unimplemented!"); - } - public String execCommandOnServer(String command, Map options) { try { return remoteDebugger.execCommandOnServer(command, options); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteThreadFactory.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteThreadFactory.java index 72303bd285446..b4e65c806f3db 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteThreadFactory.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/remote/RemoteThreadFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ import sun.jvm.hotspot.debugger.*; -/** An interface used only internally by the ProcDebugger to be able to +/** An interface used only internally by the RemoteDebuggerClient to be able to create platform-specific Thread objects */ public interface RemoteThreadFactory { diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java index 95825606cc913..1639988e1a503 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java @@ -120,19 +120,8 @@ public void checkAlignment(long address, long alignment) { } if (useCache) { - // Cache portion of the remote process's address space. - // Fetching data over the socket connection to dbx is slow. - // Might be faster if we were using a binary protocol to talk to - // dbx, but would have to test. For now, this cache works best - // if it covers the entire heap of the remote process. FIXME: at - // least should make this tunable from the outside, i.e., via - // the UI. This is a cache of 4096 4K pages, or 16 MB. The page - // size must be adjusted to be the hardware's page size. - // (FIXME: should pick this up from the debugger.) - initCache(4096, 4096); + initCache(4096, parseCacheNumPagesProperty(1024 * 64)); } - // FIXME: add instantiation of thread factory - } /** From the Debugger interface via JVMDebugger */ @@ -501,12 +490,6 @@ private DLL findDLLByName(String fullPathName) { return null; } - public void writeBytesToProcess(long address, long numBytes, byte[] data) - throws UnmappedAddressException, DebuggerException { - // FIXME - throw new DebuggerException("Unimplemented"); - } - private static String imagePath; private static String symbolPath; private static boolean useNativeLookup; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/CodeCacheSweeperThread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/CodeCacheSweeperThread.java deleted file mode 100644 index 72877516b658a..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/CodeCacheSweeperThread.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.runtime; - -import java.io.*; -import java.util.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.types.*; - -public class CodeCacheSweeperThread extends JavaThread { - public CodeCacheSweeperThread(Address addr) { - super(addr); - } - - public boolean isJavaThread() { return false; } - public boolean isHiddenFromExternalView() { return true; } - public boolean isCodeCacheSweeperThread() { return true; } - -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java index 00adf9c285b6e..84a7d520bcb58 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/JavaThread.java @@ -124,7 +124,7 @@ void setThreadPDAccess(JavaThreadPDAccess access) { } /** NOTE: for convenience, this differs in definition from the underlying VM. - Only "pure" JavaThreads return true; CompilerThreads, the CodeCacheSweeperThread, + Only "pure" JavaThreads return true; CompilerThreads, JVMDIDebuggerThreads return false. FIXME: consider encapsulating platform-specific functionality in an diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java index 3590b9ad94ffa..89b86a1f15767 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java @@ -160,7 +160,6 @@ private static synchronized void initialize(TypeDataBase db) { virtualConstructor.addMapping("JavaThread", JavaThread.class); if (!VM.getVM().isCore()) { virtualConstructor.addMapping("CompilerThread", CompilerThread.class); - virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class); } virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class); virtualConstructor.addMapping("ServiceThread", ServiceThread.class); @@ -204,7 +203,7 @@ public JavaThread createJavaThreadWrapper(Address threadAddr) { return thread; } catch (Exception e) { throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr + - " (expected type JavaThread, CompilerThread, MonitorDeflationThread, ServiceThread, JvmtiAgentThread or CodeCacheSweeperThread)", e); + " (expected type JavaThread, CompilerThread, MonitorDeflationThread, ServiceThread or JvmtiAgentThread)", e); } } diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/InstalledCode.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/InstalledCode.java index cf42ac7ed6494..489cca81c7bff 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/InstalledCode.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/InstalledCode.java @@ -29,12 +29,13 @@ public class InstalledCode { /** - * Raw address address of entity representing this installed code. + * Address of the entity (e.g., HotSpot {@code nmethod} or {@code RuntimeStub}) representing + * this installed code. */ protected long address; /** - * Raw address of entryPoint of this installed code. + * Address of the entryPoint of this installed code. */ protected long entryPoint; @@ -50,7 +51,8 @@ public InstalledCode(String name) { } /** - * @return the address of entity representing this installed code. + * @return the address of entity (e.g., HotSpot {@code nmethod} or {@code RuntimeStub}) + * representing this installed code */ public long getAddress() { return address; @@ -94,8 +96,7 @@ public boolean isValid() { } /** - * @return true if the code represented by this object still exists and might have live - * activations, false otherwise (may happen due to deopt, etc.) + * @return true if this object still points to installed code */ public boolean isAlive() { return address != 0; @@ -108,12 +109,28 @@ public byte[] getCode() { return null; } + /** + * Equivalent to calling {@link #invalidate(boolean)} with a {@code true} argument. + */ + public void invalidate() { + invalidate(true); + } + /** * Invalidates this installed code such that any subsequent * {@linkplain #executeVarargs(Object...) invocation} will throw an - * {@link InvalidInstalledCodeException} and all existing invocations will be deoptimized. + * {@link InvalidInstalledCodeException}. + * + * If this installed code is already {@linkplain #isValid() invalid}, this method has no effect. + * A subsequent call to {@link #isAlive()} or {@link #isValid()} on this object will return + * {@code false}. + * + * @param deoptimize if {@code true}, all existing invocations will be immediately deoptimized. + * If {@code false}, any existing invocation will continue until it completes or + * there is a subsequent call to this method with {@code deoptimize == true} before + * the invocation completes. */ - public void invalidate() { + public void invalidate(boolean deoptimize) { throw new UnsupportedOperationException(); } diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java index 506b9183648e6..e3d311e547c8e 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java @@ -715,12 +715,12 @@ void reprofile(HotSpotResolvedJavaMethodImpl method) { private native void reprofile(HotSpotResolvedJavaMethodImpl method, long methodPointer); /** - * Invalidates {@code nmethodMirror} such that {@link InvalidInstalledCodeException} will be - * raised the next time {@code nmethodMirror} is {@linkplain #executeHotSpotNmethod executed}. - * The {@code nmethod} associated with {@code nmethodMirror} is also made non-entrant and any - * current activations of the {@code nmethod} are deoptimized. + * Updates {@code nmethodMirror} such that {@link InvalidInstalledCodeException} will be raised + * the next time {@code nmethodMirror} is {@linkplain #executeHotSpotNmethod executed}. The + * {@code nmethod} associated with {@code nmethodMirror} is also made non-entrant and if + * {@code deoptimize == true} any current activations of the {@code nmethod} are deoptimized. */ - native void invalidateHotSpotNmethod(HotSpotNmethod nmethodMirror); + native void invalidateHotSpotNmethod(HotSpotNmethod nmethodMirror, boolean deoptimize); /** * Collects the current values of all JVMCI benchmark counters, summed up over all threads. diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotCodeCacheProvider.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotCodeCacheProvider.java index 42a5b2a5c94b1..a0130d1bda9e9 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotCodeCacheProvider.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotCodeCacheProvider.java @@ -157,7 +157,8 @@ public InstalledCode installCode(ResolvedJavaMethod method, CompiledCode compile @Override public void invalidateInstalledCode(InstalledCode installedCode) { if (installedCode instanceof HotSpotNmethod) { - runtime.getCompilerToVM().invalidateHotSpotNmethod((HotSpotNmethod) installedCode); + HotSpotNmethod nmethod = (HotSpotNmethod) installedCode; + nmethod.invalidate(true); } else { throw new IllegalArgumentException("Cannot invalidate a " + Objects.requireNonNull(installedCode).getClass().getName()); } diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotNmethod.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotNmethod.java index a18f7b556fafa..9ed03f9749e3a 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotNmethod.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotNmethod.java @@ -123,8 +123,8 @@ public ResolvedJavaMethod getMethod() { } @Override - public void invalidate() { - compilerToVM().invalidateHotSpotNmethod(this); + public void invalidate(boolean deoptimize) { + compilerToVM().invalidateHotSpotNmethod(this, deoptimize); } @Override diff --git a/src/jdk.jfr/share/conf/jfr/default.jfc b/src/jdk.jfr/share/conf/jfr/default.jfc index 71b09bd6fac9b..1c7b55dbf55d9 100644 --- a/src/jdk.jfr/share/conf/jfr/default.jfc +++ b/src/jdk.jfr/share/conf/jfr/default.jfc @@ -540,21 +540,6 @@ true - - true - beginChunk - - - - true - everyChunk - - - - true - 100 ms - - true beginChunk diff --git a/src/jdk.jfr/share/conf/jfr/profile.jfc b/src/jdk.jfr/share/conf/jfr/profile.jfc index 070e5592edda7..d8f51ed8d3de4 100644 --- a/src/jdk.jfr/share/conf/jfr/profile.jfc +++ b/src/jdk.jfr/share/conf/jfr/profile.jfc @@ -540,21 +540,6 @@ true - - true - beginChunk - - - - true - everyChunk - - - - true - 100 ms - - true beginChunk diff --git a/src/utils/IdealGraphVisualizer/Coordinator/src/main/java/com/sun/hotspot/igv/coordinator/OutlineTopComponent.java b/src/utils/IdealGraphVisualizer/Coordinator/src/main/java/com/sun/hotspot/igv/coordinator/OutlineTopComponent.java index 104b662ecf617..9962d0ef713e8 100644 --- a/src/utils/IdealGraphVisualizer/Coordinator/src/main/java/com/sun/hotspot/igv/coordinator/OutlineTopComponent.java +++ b/src/utils/IdealGraphVisualizer/Coordinator/src/main/java/com/sun/hotspot/igv/coordinator/OutlineTopComponent.java @@ -33,11 +33,13 @@ import com.sun.hotspot.igv.util.LookupHistory; import com.sun.hotspot.igv.view.EditorTopComponent; import java.awt.BorderLayout; +import java.awt.Dimension; import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.Serializable; -import javax.swing.*; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; import javax.swing.border.Border; import org.openide.ErrorManager; import org.openide.actions.GarbageCollectAction; @@ -46,12 +48,7 @@ import org.openide.explorer.ExplorerManager; import org.openide.explorer.ExplorerUtils; import org.openide.explorer.view.BeanTreeView; -import org.openide.util.Exceptions; -import org.openide.util.Lookup; -import org.openide.util.LookupEvent; -import org.openide.util.LookupListener; -import org.openide.util.NbBundle; -import org.openide.util.Utilities; +import org.openide.util.*; import org.openide.util.actions.NodeAction; import org.openide.windows.TopComponent; import org.openide.windows.WindowManager; @@ -97,8 +94,9 @@ private void initListView() { private void initToolbar() { Toolbar toolbar = new Toolbar(); - Border b = (Border) UIManager.get("Nb.Editor.Toolbar.border"); //NOI18N - toolbar.setBorder(b); + toolbar.setBorder((Border) UIManager.get("Nb.Editor.Toolbar.border")); //NOI18N + toolbar.setMinimumSize(new Dimension(0,0)); // MacOS BUG with ToolbarWithOverflow + this.add(toolbar, BorderLayout.NORTH); toolbar.add(ImportAction.get(ImportAction.class)); diff --git a/src/utils/IdealGraphVisualizer/FilterWindow/src/main/java/com/sun/hotspot/igv/filterwindow/FilterTopComponent.java b/src/utils/IdealGraphVisualizer/FilterWindow/src/main/java/com/sun/hotspot/igv/filterwindow/FilterTopComponent.java index f1206d1c9e83b..ea484efcd63c7 100644 --- a/src/utils/IdealGraphVisualizer/FilterWindow/src/main/java/com/sun/hotspot/igv/filterwindow/FilterTopComponent.java +++ b/src/utils/IdealGraphVisualizer/FilterWindow/src/main/java/com/sun/hotspot/igv/filterwindow/FilterTopComponent.java @@ -31,13 +31,17 @@ import com.sun.hotspot.igv.filter.FilterSetting; import com.sun.hotspot.igv.filterwindow.actions.*; import java.awt.BorderLayout; +import java.awt.Dimension; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.io.*; import java.util.*; import java.util.logging.Level; import java.util.logging.Logger; -import javax.script.*; +import javax.script.ScriptContext; +import javax.script.ScriptEngine; +import javax.script.ScriptEngineManager; +import javax.script.ScriptException; import javax.swing.JComboBox; import javax.swing.UIManager; import javax.swing.border.Border; @@ -360,8 +364,9 @@ private FilterTopComponent() { ToolbarPool.getDefault().setPreferredIconSize(16); Toolbar toolBar = new Toolbar(); - Border b = (Border) UIManager.get("Nb.Editor.Toolbar.border"); //NOI18N - toolBar.setBorder(b); + toolBar.setBorder((Border) UIManager.get("Nb.Editor.Toolbar.border")); //NOI18N + toolBar.setMinimumSize(new Dimension(0,0)); // MacOS BUG with ToolbarWithOverflow + comboBox = new JComboBox(); toolBar.add(comboBox); this.add(toolBar, BorderLayout.NORTH); diff --git a/src/utils/IdealGraphVisualizer/View/src/main/java/com/sun/hotspot/igv/view/EditorTopComponent.java b/src/utils/IdealGraphVisualizer/View/src/main/java/com/sun/hotspot/igv/view/EditorTopComponent.java index f3f8259f94dee..432d3eebd306a 100644 --- a/src/utils/IdealGraphVisualizer/View/src/main/java/com/sun/hotspot/igv/view/EditorTopComponent.java +++ b/src/utils/IdealGraphVisualizer/View/src/main/java/com/sun/hotspot/igv/view/EditorTopComponent.java @@ -23,13 +23,14 @@ */ package com.sun.hotspot.igv.view; -import com.sun.hotspot.igv.data.ChangedEvent; -import com.sun.hotspot.igv.data.ChangedListener; -import com.sun.hotspot.igv.data.GraphDocument; -import com.sun.hotspot.igv.data.Group; -import com.sun.hotspot.igv.data.InputNode; -import com.sun.hotspot.igv.data.InputBlock; +import com.lowagie.text.Document; +import com.lowagie.text.Rectangle; +import com.lowagie.text.pdf.PdfContentByte; +import com.lowagie.text.pdf.PdfGraphics2D; +import com.lowagie.text.pdf.PdfTemplate; +import com.lowagie.text.pdf.PdfWriter; import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.*; import com.sun.hotspot.igv.data.Properties.PropertyMatcher; import com.sun.hotspot.igv.data.services.InputGraphProvider; import com.sun.hotspot.igv.filter.FilterChain; @@ -37,16 +38,15 @@ import com.sun.hotspot.igv.graph.Diagram; import com.sun.hotspot.igv.graph.Figure; import com.sun.hotspot.igv.graph.services.DiagramProvider; +import com.sun.hotspot.igv.settings.Settings; import com.sun.hotspot.igv.util.LookupHistory; import com.sun.hotspot.igv.util.RangeSlider; -import com.sun.hotspot.igv.settings.Settings; import com.sun.hotspot.igv.view.actions.*; import java.awt.*; import java.awt.event.ActionEvent; import java.awt.event.HierarchyBoundsListener; import java.awt.event.HierarchyEvent; import java.awt.event.KeyEvent; -import java.awt.event.KeyListener; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.io.*; @@ -58,13 +58,6 @@ import org.apache.batik.dom.GenericDOMImplementation; import org.apache.batik.svggen.SVGGeneratorContext; import org.apache.batik.svggen.SVGGraphics2D; -import com.lowagie.text.Document; -import com.lowagie.text.Rectangle; -import com.lowagie.text.pdf.PdfWriter; -import com.lowagie.text.pdf.PdfContentByte; -import com.lowagie.text.pdf.PdfTemplate; -import com.lowagie.text.pdf.PdfGraphics2D; -import org.w3c.dom.DOMImplementation; import org.openide.DialogDisplayer; import org.openide.NotifyDescriptor; import org.openide.actions.RedoAction; @@ -79,11 +72,9 @@ import org.openide.util.lookup.AbstractLookup; import org.openide.util.lookup.InstanceContent; import org.openide.util.lookup.ProxyLookup; -import org.openide.windows.Mode; import org.openide.windows.TopComponent; -import org.openide.windows.WindowManager; +import org.w3c.dom.DOMImplementation; -import static java.nio.charset.StandardCharsets.UTF_8; /** * @@ -92,7 +83,6 @@ public final class EditorTopComponent extends TopComponent implements PropertyChangeListener { private DiagramViewer scene; - private Toolbar toolBar; private InstanceContent content; private InstanceContent graphContent; private EnableSeaLayoutAction seaLayoutAction; @@ -110,16 +100,9 @@ public final class EditorTopComponent extends TopComponent implements PropertyCh private RangeSlider rangeSlider; private JToggleButton overviewButton; private JToggleButton hideDuplicatesButton; - - private static final Component quicksearch; - static { - Action searchAction = Utilities.actionsForPath("Actions/Search").get(0); - quicksearch = ((Presenter.Toolbar) searchAction).getToolbarPresenter(); - Dimension preferredSize = quicksearch.getPreferredSize(); - preferredSize = new Dimension((int) preferredSize.getWidth() * 2, (int) preferredSize.getHeight()); - quicksearch.setMinimumSize(preferredSize); // necessary for GTK LAF - quicksearch.setPreferredSize(preferredSize); - } + private JPanel topPanel; + private Toolbar quickSearchToolbar; + private static final JPanel quickSearchPresenter = (JPanel) ((Presenter.Toolbar) Utilities.actionsForPath("Actions/Search").get(0)).getToolbarPresenter(); private static final String PREFERRED_ID = "EditorTopComponent"; private static final String SATELLITE_STRING = "satellite"; private static final String SCENE_STRING = "scene"; @@ -208,9 +191,10 @@ public EditorTopComponent(Diagram diagram) { initComponents(); ToolbarPool.getDefault().setPreferredIconSize(16); - toolBar = new Toolbar(); - Border b = (Border) UIManager.get("Nb.Editor.Toolbar.border"); //NOI18N - toolBar.setBorder(b); + Toolbar toolBar = new Toolbar(); + toolBar.setBorder((Border) UIManager.get("Nb.Editor.Toolbar.border")); //NOI18N + toolBar.setMinimumSize(new Dimension(0,0)); // MacOS BUG with ToolbarWithOverflow + JPanel container = new JPanel(); this.add(container, BorderLayout.NORTH); container.setLayout(new BorderLayout()); @@ -324,9 +308,24 @@ public void changed(DiagramViewModel source) { button.setSelected(false); toolBar.add(button); selectionModeAction.addPropertyChangeListener(this); - toolBar.add(Box.createHorizontalGlue()); - toolBar.add(quicksearch); + + quickSearchToolbar = new Toolbar(); + quickSearchToolbar.setLayout(new BoxLayout(quickSearchToolbar, BoxLayout.LINE_AXIS)); + quickSearchToolbar.setBorder((Border) UIManager.get("Nb.Editor.Toolbar.border")); //NOI18N + quickSearchPresenter.setMinimumSize(quickSearchPresenter.getPreferredSize()); + quickSearchPresenter.setAlignmentX(Component.RIGHT_ALIGNMENT); + quickSearchToolbar.add(quickSearchPresenter); + + // Needed for toolBar to use maximal available width + JPanel toolbarPanel = new JPanel(new GridLayout(1, 0)); + toolbarPanel.add(toolBar); + + topPanel = new JPanel(); + topPanel.setLayout(new BoxLayout(topPanel, BoxLayout.LINE_AXIS)); + topPanel.add(toolbarPanel); + topPanel.add(quickSearchToolbar); + container.add(BorderLayout.NORTH, topPanel); centerPanel = new JPanel(); centerPanel.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put( @@ -457,12 +456,9 @@ public int getPersistenceType() { return TopComponent.PERSISTENCE_NEVER; } - @Override - public void componentOpened() { - } - @Override public void componentClosed() { + super.componentClosed(); rangeSliderModel.close(); } @@ -658,7 +654,6 @@ protected void componentHidden() { @Override protected void componentShowing() { - toolBar.add(quicksearch); super.componentShowing(); scene.componentShowing(); } @@ -669,6 +664,13 @@ public void requestActive() { scene.getComponent().requestFocus(); } + @Override + protected void componentActivated() { + super.componentActivated(); + quickSearchToolbar.add(quickSearchPresenter); + quickSearchPresenter.revalidate(); + } + @Override public UndoRedo getUndoRedo() { return scene.getUndoRedo(); diff --git a/test/hotspot/gtest/code/test_dependencyContext.cpp b/test/hotspot/gtest/code/test_dependencyContext.cpp deleted file mode 100644 index 317a8a39c9bcb..0000000000000 --- a/test/hotspot/gtest/code/test_dependencyContext.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "code/dependencyContext.hpp" -#include "code/nmethod.hpp" -#include "runtime/mutexLocker.hpp" -#include "unittest.hpp" - -class TestDependencyContext { - public: - nmethod _nmethods[3]; - - nmethodBucket* volatile _dependency_context; - volatile uint64_t _last_cleanup; - - DependencyContext dependencies() { - DependencyContext depContext(&_dependency_context, &_last_cleanup); - return depContext; - } - - TestDependencyContext() - : _dependency_context(NULL), - _last_cleanup(0) { - CodeCache_lock->lock_without_safepoint_check(); - - _nmethods[0].clear_unloading_state(); - _nmethods[1].clear_unloading_state(); - _nmethods[2].clear_unloading_state(); - - dependencies().add_dependent_nmethod(&_nmethods[2]); - dependencies().add_dependent_nmethod(&_nmethods[1]); - dependencies().add_dependent_nmethod(&_nmethods[0]); - } - - ~TestDependencyContext() { - wipe(); - CodeCache_lock->unlock(); - } - - void wipe() { - DependencyContext ctx(&_dependency_context, &_last_cleanup); - nmethodBucket* b = ctx.dependencies(); - ctx.set_dependencies(NULL); - while (b != NULL) { - nmethodBucket* next = b->next(); - delete b; - b = next; - } - } -}; - -static void test_remove_dependent_nmethod(int id) { - TestDependencyContext c; - DependencyContext depContext = c.dependencies(); - - nmethod* nm = &c._nmethods[id]; - depContext.remove_dependent_nmethod(nm); - - ASSERT_FALSE(depContext.is_dependent_nmethod(nm)); -} - -TEST_VM(code, dependency_context) { - test_remove_dependent_nmethod(0); - test_remove_dependent_nmethod(1); - test_remove_dependent_nmethod(2); -} diff --git a/test/hotspot/gtest/opto/test_moveBits.cpp b/test/hotspot/gtest/opto/test_moveBits.cpp new file mode 100644 index 0000000000000..d25f18ddb68a1 --- /dev/null +++ b/test/hotspot/gtest/opto/test_moveBits.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/moveBits.hpp" +#include "unittest.hpp" + +template +inline void test_moveBits() { + const int NBIT = sizeof(T) * 8; + const bool IS_U = (T)-1 > 0; + const int XOR_REV_BITS = (NBIT - 1); + const int XOR_REV_BITS_IN_BYTES = 7; // only flip position in byte + const int XOR_REV_BYTES = XOR_REV_BITS ^ XOR_REV_BITS_IN_BYTES; + printf("testing %sint%d_t...\n", IS_U ? "u" : "", NBIT); + ASSERT_EQ(reverse_bits((T)0), (T)0); + ASSERT_EQ(reverse_bits((T)-1), (T)-1); + ASSERT_EQ(reverse_bytes((T)0), (T)0); + ASSERT_EQ(reverse_bytes((T)-1), (T)-1); + ASSERT_EQ(reverse_bits_in_bytes((T)0), (T)0); + ASSERT_EQ(reverse_bits_in_bytes((T)-1), (T)-1); + for (int i1 = 0; i1 < NBIT; i1++) { + T mask1 = (T)1 << i1; + T revm1 = (T)1 << (i1 ^ XOR_REV_BITS); + T rbym1 = (T)1 << (i1 ^ XOR_REV_BYTES); + T ribm1 = (T)1 << (i1 ^ XOR_REV_BITS_IN_BYTES); + for (int i2 = 0; i2 <= i1; i2++) { + T mask2 = (T)1 << i2; + T revm2 = (T)1 << (i2 ^ XOR_REV_BITS); + T rbym2 = (T)1 << (i2 ^ XOR_REV_BYTES); + T ribm2 = (T)1 << (i2 ^ XOR_REV_BITS_IN_BYTES); + T mask = mask1|mask2; +#define STUFF (IS_U?"u":"s") << NBIT << "@" << i1 << "," << i2 + ASSERT_EQ(reverse_bits(mask), revm1|revm2) << STUFF; + ASSERT_EQ((T)~reverse_bits((T)~mask), revm1|revm2) << STUFF; + ASSERT_EQ(reverse_bytes(mask), rbym1|rbym2) << STUFF; + ASSERT_EQ((T)~reverse_bytes((T)~mask), rbym1|rbym2) << STUFF; + ASSERT_EQ(reverse_bits_in_bytes(mask), ribm1|ribm2) << STUFF; + ASSERT_EQ((T)~reverse_bits_in_bytes((T)~mask), ribm1|ribm2) << STUFF; + } + } +} + +TEST_VM(opto, moveBits) { + test_moveBits(); + test_moveBits(); + test_moveBits(); + test_moveBits(); + test_moveBits(); + test_moveBits(); + test_moveBits(); + test_moveBits(); +} + +// Here is some object code to look at if we want to do a manual +// study. One could find the build file named test_moveBits.o.cmdline +// and hand-edit the command line to produce assembly code in +// test_moveBits.s. +// +// Or, given the two empty "fence functions", one could do a +// quick scan like this: +// +// $ objdump -D $(find build/*release -name test_moveBits.o) \ +// | sed -n '/start_code_quality/,$p;/end_code_quality/q' \ +// | egrep -B10 bswap # or grep -B20 cfi_endproc + +void start_code_quality_moveBits() { } + +int32_t code_quality_reverse_bits_32(int32_t x) { + return reverse_bits(x); +} + +int32_t code_quality_reverse_bytes_32(int32_t x) { + return reverse_bytes(x); +} + +int32_t code_quality_reverse_bits_in_bytes_32(int32_t x) { + return reverse_bits_in_bytes(x); +} + +int64_t code_quality_reverse_bits_64(int64_t x) { + return reverse_bits(x); +} + +int64_t code_quality_reverse_bytes_64(int64_t x) { + return reverse_bytes(x); +} + +int64_t code_quality_reverse_bits_in_bytes_64(int64_t x) { + return reverse_bits_in_bytes(x); +} diff --git a/test/hotspot/jtreg/ProblemList.txt b/test/hotspot/jtreg/ProblemList.txt index 0bb3b09f9bb8c..5efabd7339310 100644 --- a/test/hotspot/jtreg/ProblemList.txt +++ b/test/hotspot/jtreg/ProblemList.txt @@ -149,6 +149,7 @@ vmTestbase/nsk/jvmti/SetJNIFunctionTable/setjniftab001/TestDescription.java 8219 vmTestbase/nsk/jvmti/AttachOnDemand/attach002a/TestDescription.java 8277812 generic-all vmTestbase/nsk/jvmti/scenarios/capability/CM03/cm03t001/TestDescription.java 8073470 linux-all +vmTestbase/gc/lock/jni/jnilock001/TestDescription.java 8292946 generic-all vmTestbase/gc/lock/jni/jnilock002/TestDescription.java 8192647 generic-all vmTestbase/jit/escape/LockCoarsening/LockCoarsening001.java 8148743 generic-all diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64ThreadContext.java b/test/hotspot/jtreg/compiler/c1/BadStateAtLongCmp.jasm similarity index 54% rename from src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64ThreadContext.java rename to test/hotspot/jtreg/compiler/c1/BadStateAtLongCmp.jasm index d65c4defcb99b..5ff8986fb05fb 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/ppc64/ProcPPC64ThreadContext.java +++ b/test/hotspot/jtreg/compiler/c1/BadStateAtLongCmp.jasm @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -19,28 +19,51 @@ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. - * */ -package sun.jvm.hotspot.debugger.proc.ppc64; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.ppc64.*; -import sun.jvm.hotspot.debugger.proc.*; +super public class BadStateAtLongCmp + version 52:0 +{ + public static Field field:I; -public class ProcPPC64ThreadContext extends PPC64ThreadContext { - private ProcDebugger debugger; - - public ProcPPC64ThreadContext(ProcDebugger debugger) { - super(); - this.debugger = debugger; + public Method "":"()V" + stack 1 locals 1 + { + aload_0; + invokespecial Method java/lang/Object."":"()V"; + return; } - public void setRegisterAsAddress(int index, Address value) { - setRegister(index, debugger.getAddressValue(value)); + /* Same as: + public static void test() { + long l = 0; + do { + l++; + field++; + } while (l < 1000); + } + but with field++ between the lcmp and iflt bytecodes. + */ + public static Method test:"()V" + stack 4 locals 2 + { + lconst_0; + lstore_0; + L2: stack_frame_type append; + locals_map long; + lload_0; + lconst_1; + ladd; + lstore_0; + lload_0; + ldc2_w long 1000l; + lcmp; + getstatic Field field:"I"; + iconst_1; + iadd; + putstatic Field field:"I"; + iflt L2; + return; } - public Address getRegisterAsAddress(int index) { - return debugger.newAddress(getRegister(index)); - } -} +} // end Class BadStateAtLongCmp diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86ThreadFactory.java b/test/hotspot/jtreg/compiler/c1/TestBadStateAtLongCmp.java similarity index 60% rename from src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86ThreadFactory.java rename to test/hotspot/jtreg/compiler/c1/TestBadStateAtLongCmp.java index 02b06fc7d32db..39df5a0b7c54c 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/proc/x86/ProcX86ThreadFactory.java +++ b/test/hotspot/jtreg/compiler/c1/TestBadStateAtLongCmp.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -19,26 +19,25 @@ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. - * */ -package sun.jvm.hotspot.debugger.proc.x86; - -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.debugger.proc.*; - -public class ProcX86ThreadFactory implements ProcThreadFactory { - private ProcDebugger debugger; - - public ProcX86ThreadFactory(ProcDebugger debugger) { - this.debugger = debugger; - } +/* + * @test + * @bug 8290451 + * @summary Incorrect result when switching to C2 OSR compilation from C1 + * @compile BadStateAtLongCmp.jasm + * @run main/othervm -Xbatch TestBadStateAtLongCmp + */ - public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { - return new ProcX86Thread(debugger, threadIdentifierAddr); - } +public class TestBadStateAtLongCmp { - public ThreadProxy createThreadWrapper(long id) { - return new ProcX86Thread(debugger, id); - } + public static void main(String[] args) { + for (int i = 0; i < 20_000; i++) { + BadStateAtLongCmp.test(); + } + int expected = 20_000 * 1000; + if (BadStateAtLongCmp.field != expected) { + throw new RuntimeException("test failed: " + BadStateAtLongCmp.field + " != " + expected); + } + } } diff --git a/test/hotspot/jtreg/compiler/c2/TestMulNodeInfiniteGVN.java b/test/hotspot/jtreg/compiler/c2/TestMulNodeInfiniteGVN.java new file mode 100644 index 0000000000000..7de82151f1a69 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/TestMulNodeInfiniteGVN.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022, Arm Limited. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key stress randomness + * @bug 8291466 + * @summary Infinite loop in PhaseIterGVN::transform_old with -XX:+StressIGVN + * @requires vm.compiler2.enabled + * @run main/othervm -Xbatch -XX:-TieredCompilation + * -XX:+UnlockDiagnosticVMOptions -XX:+StressIGVN + * -XX:StressSeed=1 compiler.c2.TestMulNodeInfiniteGVN + */ + +package compiler.c2; + +public class TestMulNodeInfiniteGVN { + + private static int fun() { + int sum = 0; + for (int c = 0; c < 50000; c++) { + int x = 9; + while ((x += 2) < 12) { + for (int k = 1; k < 2; k++) { + sum += x * k; + } + } + int y = 11; + while ((y += 2) < 14) { + for (int k = 1; k < 2; k++) { + sum += y * k; + } + } + int z = 17; + while ((z += 2) < 20) { + for (int k = 1; k < 2; k++) { + sum += z * k; + } + } + } + return sum; + } + + public static void main(String[] args) { + fun(); + } +} diff --git a/test/hotspot/jtreg/compiler/ccp/TestInfiniteIGVNAfterCCP.java b/test/hotspot/jtreg/compiler/ccp/TestInfiniteIGVNAfterCCP.java new file mode 100644 index 0000000000000..cb4a459105f38 --- /dev/null +++ b/test/hotspot/jtreg/compiler/ccp/TestInfiniteIGVNAfterCCP.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2022, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8290711 + * @summary assert(false) failed: infinite loop in PhaseIterGVN::optimize + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation TestInfiniteIGVNAfterCCP + */ + + +import java.util.function.BooleanSupplier; + +public class TestInfiniteIGVNAfterCCP { + private static int inc; + private static volatile boolean barrier; + + static class A { + int field1; + int field2; + } + + public static void main(String[] args) { + A a = new A(); + for (int i = 0; i < 20_000; i++) { + test(false, a, false); + inc = 0; + testHelper(true, () -> inc < 10, a, 4, true); + inc = 0; + testHelper(true, () -> inc < 10, a, 4, false); + testHelper(false, () -> inc < 10, a, 42, false); + } + } + + private static void test(boolean flag2, A a, boolean flag1) { + int i = 2; + for (; i < 4; i *= 2); + testHelper(flag2, () -> true, a, i, flag1); + } + + private static void testHelper(boolean flag2, BooleanSupplier f, A a, int i, boolean flag1) { + if (i == 4) { + if (a == null) { + + } + } else { + a = null; + } + if (flag2) { + while (true) { + synchronized (new Object()) { + + } + if (!f.getAsBoolean()) { + break; + } + if (flag1) { + if (a == null) { + + } + } + barrier = true; + inc++; + if (inc % 2 == 0) { + a.field1++; + } + } + } + } +} diff --git a/test/hotspot/jtreg/compiler/codecache/CheckCodeCacheInfo.java b/test/hotspot/jtreg/compiler/codecache/CheckCodeCacheInfo.java index 26f563788f847..9b2c9b316ac55 100644 --- a/test/hotspot/jtreg/compiler/codecache/CheckCodeCacheInfo.java +++ b/test/hotspot/jtreg/compiler/codecache/CheckCodeCacheInfo.java @@ -43,8 +43,7 @@ public class CheckCodeCacheInfo { static { String entry = "\\d+K( \\(hdr \\d+K \\d+%, loc \\d+K \\d+%, code \\d+K \\d+%, stub \\d+K \\d+%, \\[oops \\d+K \\d+%, metadata \\d+K \\d+%, data \\d+K \\d+%, pcs \\d+K \\d+%\\]\\))?\\n"; - String pair = " #\\d+ live = " + entry - + " #\\d+ dead = " + entry; + String pair = " #\\d+ live = " + entry; VERBOSE_REGEXP = "nmethod blobs per compilation level:\\n" + "none:\\n" diff --git a/test/hotspot/jtreg/compiler/codecache/OverflowCodeCacheTest.java b/test/hotspot/jtreg/compiler/codecache/OverflowCodeCacheTest.java index 5862cc4f97c6d..cf993237a32ec 100644 --- a/test/hotspot/jtreg/compiler/codecache/OverflowCodeCacheTest.java +++ b/test/hotspot/jtreg/compiler/codecache/OverflowCodeCacheTest.java @@ -120,10 +120,9 @@ private void test() { WHITE_BOX.freeCodeBlob(blob); } - // Convert some nmethods to zombie and then free them to re-enable compilation + // Let the GC free nmethods and re-enable compilation WHITE_BOX.unlockCompilation(); - WHITE_BOX.forceNMethodSweep(); - WHITE_BOX.forceNMethodSweep(); + WHITE_BOX.fullGC(); // Trigger compilation of Helper::method which will hit an assert because // adapter creation failed above due to a lack of code cache space. diff --git a/test/hotspot/jtreg/compiler/exceptions/OptimizeImplicitExceptions.java b/test/hotspot/jtreg/compiler/exceptions/OptimizeImplicitExceptions.java index 1a26bcde9f73b..ca61e95a68fa3 100644 --- a/test/hotspot/jtreg/compiler/exceptions/OptimizeImplicitExceptions.java +++ b/test/hotspot/jtreg/compiler/exceptions/OptimizeImplicitExceptions.java @@ -112,13 +112,10 @@ public static Object throwImplicitException(ImplicitException type, Object[] obj return null; } - // Completely unload (i.e. make "not-entrant"->"zombie"->"unload/free") a JIT-compiled + // Completely unload (i.e. make "not-entrant"->free) a JIT-compiled // version of a method and clear the method's profiling counters. private static void unloadAndClean(Method m) { WB.deoptimizeMethod(m); // Makes the nmethod "not entrant". - WB.forceNMethodSweep(); // Makes all "not entrant" nmethods "zombie". This requires - WB.forceNMethodSweep(); // two sweeps, see 'nmethod::can_convert_to_zombie()' for why. - WB.forceNMethodSweep(); // Need third sweep to actually unload/free all "zombie" nmethods. System.gc(); WB.clearMethodState(m); } diff --git a/test/hotspot/jtreg/compiler/jsr292/ContinuousCallSiteTargetChange.java b/test/hotspot/jtreg/compiler/jsr292/ContinuousCallSiteTargetChange.java index e877e9aa3d954..e77df166da1e0 100644 --- a/test/hotspot/jtreg/compiler/jsr292/ContinuousCallSiteTargetChange.java +++ b/test/hotspot/jtreg/compiler/jsr292/ContinuousCallSiteTargetChange.java @@ -171,7 +171,7 @@ public static void main(String[] args) throws Throwable { WhiteBox whiteBox = WhiteBox.getWhiteBox(); for (int i = 0; i < iterations; i++) { iteration(); - whiteBox.forceNMethodSweep(); + whiteBox.fullGC(); } } } diff --git a/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java b/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java index 222031e535004..b38436119f35f 100644 --- a/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java +++ b/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java @@ -244,7 +244,7 @@ public static void reprofile(HotSpotResolvedJavaMethod method) { } public static void invalidateHotSpotNmethod(HotSpotNmethod nmethodMirror) { - CTVM.invalidateHotSpotNmethod(nmethodMirror); + CTVM.invalidateHotSpotNmethod(nmethodMirror, true); } public static long[] collectCounters() { diff --git a/test/hotspot/jtreg/compiler/loopstripmining/TestLSMBadControlOverride.java b/test/hotspot/jtreg/compiler/loopstripmining/TestLSMBadControlOverride.java new file mode 100644 index 0000000000000..0f3ebea714767 --- /dev/null +++ b/test/hotspot/jtreg/compiler/loopstripmining/TestLSMBadControlOverride.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2022, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8290781 + * @summary Segfault at PhaseIdealLoop::clone_loop_handle_data_uses + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation TestLSMBadControlOverride + */ + +public class TestLSMBadControlOverride { + private static volatile int barrier; + + public static void main(String[] args) { + int[] array = new int[100]; + int[] small = new int[10]; + for (int i = 0; i < 20_000; i++) { + test(array, array, true, true); + test(array, array, true, false); + test(array, array, false, false); + try { + test(small, array,true, true); + } catch (ArrayIndexOutOfBoundsException aieoobe) { + + } + } + } + + private static int test(int[] array, int[] array2, boolean flag1, boolean flag2) { + int i; + int v = 0; + int v1 = 0; + for (i = 0; i < 100; i++) { + v1 = array[i]; + } + v += v1; + if (flag1) { + if (flag2) { + barrier = 42; + } + } + for (int j = 0; j < 100; j++) { + array[j] = j; + v += array[i-1]; + } + return v; + } +} diff --git a/test/hotspot/jtreg/compiler/whitebox/AllocationCodeBlobTest.java b/test/hotspot/jtreg/compiler/whitebox/AllocationCodeBlobTest.java index 26e322b43304d..29168a1e7e87e 100644 --- a/test/hotspot/jtreg/compiler/whitebox/AllocationCodeBlobTest.java +++ b/test/hotspot/jtreg/compiler/whitebox/AllocationCodeBlobTest.java @@ -58,10 +58,10 @@ public class AllocationCodeBlobTest { private static final int SIZE = 1; public static void main(String[] args) { - // check that Sweeper handels dummy blobs correctly + // check that code unloading handles dummy blobs correctly Thread t = new Thread( - new InfiniteLoop(WHITE_BOX::forceNMethodSweep, 1L), - "ForcedSweeper"); + new InfiniteLoop(WHITE_BOX::fullGC, 1L), + "ForcedGC"); t.setDaemon(true); System.out.println("Starting " + t.getName()); t.start(); diff --git a/test/hotspot/jtreg/compiler/whitebox/ForceNMethodSweepTest.java b/test/hotspot/jtreg/compiler/whitebox/ForceNMethodSweepTest.java index 8f61367b07d93..9d6f1a074e714 100644 --- a/test/hotspot/jtreg/compiler/whitebox/ForceNMethodSweepTest.java +++ b/test/hotspot/jtreg/compiler/whitebox/ForceNMethodSweepTest.java @@ -69,7 +69,7 @@ protected void test() throws Exception { Asserts.assertLT(-1, 0, "message"); checkNotCompiled(); - guaranteedSweep(); + WHITE_BOX.fullGC(); int usage = getTotalUsage(); compile(); @@ -78,13 +78,13 @@ protected void test() throws Exception { Asserts.assertGT(afterCompilation, usage, "compilation should increase usage"); - guaranteedSweep(); + WHITE_BOX.fullGC(); int afterSweep = getTotalUsage(); Asserts.assertLTE(afterSweep, afterCompilation, "sweep shouldn't increase usage"); deoptimize(); - guaranteedSweep(); + WHITE_BOX.fullGC(); int afterDeoptAndSweep = getTotalUsage(); Asserts.assertLT(afterDeoptAndSweep, afterSweep, "sweep after deoptimization should decrease usage"); @@ -97,11 +97,4 @@ private int getTotalUsage() { } return usage; } - private void guaranteedSweep() { - // not entrant -> ++stack_traversal_mark -> zombie -> flushed - for (int i = 0; i < 5; ++i) { - WHITE_BOX.fullGC(); - WHITE_BOX.forceNMethodSweep(); - } - } } diff --git a/test/hotspot/jtreg/containers/docker/TestMemoryAwareness.java b/test/hotspot/jtreg/containers/docker/TestMemoryAwareness.java index 23235e24fda08..ff5cd23cc65f5 100644 --- a/test/hotspot/jtreg/containers/docker/TestMemoryAwareness.java +++ b/test/hotspot/jtreg/containers/docker/TestMemoryAwareness.java @@ -24,6 +24,7 @@ /* * @test + * @bug 8146115 8292083 * @key cgroups * @summary Test JVM's memory resource awareness when running inside docker container * @requires docker.support @@ -41,9 +42,18 @@ import jdk.test.lib.containers.docker.DockerTestUtils; import jdk.test.lib.process.OutputAnalyzer; +import static jdk.test.lib.Asserts.assertNotNull; + public class TestMemoryAwareness { private static final String imageName = Common.imageName("memory"); + private static String getHostMaxMemory() throws Exception { + DockerRunOptions opts = Common.newOpts(imageName); + String goodMem = Common.run(opts).firstMatch("total physical memory: (\\d+)", 1); + assertNotNull(goodMem, "no match for 'total physical memory' in trace output"); + return goodMem; + } + public static void main(String[] args) throws Exception { if (!DockerTestUtils.canTestDocker()) { return; @@ -76,6 +86,10 @@ public static void main(String[] args) throws Exception { "1G", Integer.toString(((int) Math.pow(2, 20)) * 1024), "1500M", Integer.toString(((int) Math.pow(2, 20)) * (1500 - 1024)) ); + final String hostMaxMem = getHostMaxMemory(); + testOperatingSystemMXBeanIgnoresMemLimitExceedingPhysicalMemory(hostMaxMem); + testMetricsIgnoresMemLimitExceedingPhysicalMemory(hostMaxMem); + testContainerMemExceedsPhysical(hostMaxMem); } finally { if (!DockerTestUtils.RETAIN_IMAGE_AFTER_TEST) { DockerTestUtils.removeDockerImage(imageName); @@ -96,6 +110,20 @@ private static void testMemoryLimit(String valueToSet, String expectedTraceValue .shouldMatch("Memory Limit is:.*" + expectedTraceValue); } + // JDK-8292083 + // Ensure that Java ignores container memory limit values above the host's physical memory. + private static void testContainerMemExceedsPhysical(final String hostMaxMem) + throws Exception { + Common.logNewTestCase("container memory limit exceeds physical memory"); + String badMem = hostMaxMem + "0"; + // set a container memory limit to the bad value + DockerRunOptions opts = Common.newOpts(imageName) + .addDockerOpts("--memory", badMem); + + Common.run(opts) + .shouldMatch("container memory limit (ignored: " + badMem + "|unlimited: -1), using host value " + hostMaxMem); + } + private static void testMemorySoftLimit(String valueToSet, String expectedTraceValue) throws Exception { @@ -174,4 +202,23 @@ private static void testOperatingSystemMXBeanAwareness(String memoryAllocation, } } + + // JDK-8292541: Ensure OperatingSystemMXBean ignores container memory limits above the host's physical memory. + private static void testOperatingSystemMXBeanIgnoresMemLimitExceedingPhysicalMemory(final String hostMaxMem) + throws Exception { + String badMem = hostMaxMem + "0"; + testOperatingSystemMXBeanAwareness(badMem, hostMaxMem, badMem, hostMaxMem); + } + + // JDK-8292541: Ensure Metrics ignores container memory limits above the host's physical memory. + private static void testMetricsIgnoresMemLimitExceedingPhysicalMemory(final String hostMaxMem) + throws Exception { + Common.logNewTestCase("Metrics ignore container memory limit exceeding physical memory"); + String badMem = hostMaxMem + "0"; + DockerRunOptions opts = Common.newOpts(imageName) + .addJavaOpts("-XshowSettings:system") + .addDockerOpts("--memory", badMem); + + DockerTestUtils.dockerRunJava(opts).shouldMatch("Memory Limit: Unlimited"); + } } diff --git a/test/hotspot/jtreg/runtime/CommandLine/PrintTouchedMethods.java b/test/hotspot/jtreg/runtime/CommandLine/PrintTouchedMethods.java deleted file mode 100644 index a5900132b758a..0000000000000 --- a/test/hotspot/jtreg/runtime/CommandLine/PrintTouchedMethods.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @bug 8025692 8273333 - * @requires vm.flavor != "zero" - * @modules java.base/jdk.internal.misc - * java.management - * @library /test/lib - * @run driver PrintTouchedMethods - */ - -import java.io.File; -import java.util.List; -import jdk.test.lib.process.ProcessTools; -import jdk.test.lib.process.OutputAnalyzer; -import jdk.test.lib.JDKToolFinder; - -public class PrintTouchedMethods { - - public static void main(String args[]) throws Exception { - ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( - "-XX:-UnlockDiagnosticVMOptions", - "-XX:+LogTouchedMethods", - "-XX:+PrintTouchedMethodsAtExit", - TestLogTouchedMethods.class.getName()); - - // UnlockDiagnostic turned off, should fail - OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldNotHaveExitValue(0); - output.shouldContain("Error: VM option 'LogTouchedMethods' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions."); - output.shouldContain("Error: Could not create the Java Virtual Machine."); - - pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UnlockDiagnosticVMOptions", - "-XX:+LogTouchedMethods", - "-XX:+PrintTouchedMethodsAtExit", - TestLogTouchedMethods.class.getName()); - output = new OutputAnalyzer(pb.start()); - // check order: - // 1 "# Method::print_touched_methods version 1" is the first in first line - // 2 should contain TestLogMethods.methodA:()V - // 3 should not contain TestLogMethods.methodB:()V - // Repeat above for another run with -Xint - List lines = output.asLines(); - - if (lines.size() < 1) { - throw new Exception("Empty output"); - } - - String first = lines.get(0); - if (!first.equals("# Method::print_touched_methods version 1")) { - throw new Exception("First line mismatch"); - } - - output.shouldContain("TestLogTouchedMethods.methodA:()V"); - output.shouldNotContain("TestLogTouchedMethods.methodB:()V"); - output.shouldHaveExitValue(0); - - pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UnlockDiagnosticVMOptions", - "-Xint", - "-XX:+LogTouchedMethods", - "-XX:+PrintTouchedMethodsAtExit", - TestLogTouchedMethods.class.getName()); - output = new OutputAnalyzer(pb.start()); - lines = output.asLines(); - - if (lines.size() < 1) { - throw new Exception("Empty output"); - } - - first = lines.get(0); - if (!first.equals("# Method::print_touched_methods version 1")) { - throw new Exception("First line mismatch"); - } - - output.shouldContain("TestLogTouchedMethods.methodA:()V"); - output.shouldNotContain("TestLogTouchedMethods.methodB:()V"); - output.shouldHaveExitValue(0); - - pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UnlockDiagnosticVMOptions", - "-Xint", - "-XX:+LogTouchedMethods", - "-XX:+PrintTouchedMethodsAtExit", - "-XX:-TieredCompilation", - TestLogTouchedMethods.class.getName()); - output = new OutputAnalyzer(pb.start()); - lines = output.asLines(); - - if (lines.size() < 1) { - throw new Exception("Empty output"); - } - - first = lines.get(0); - if (!first.equals("# Method::print_touched_methods version 1")) { - throw new Exception("First line mismatch"); - } - - output.shouldContain("TestLogTouchedMethods.methodA:()V"); - output.shouldNotContain("TestLogTouchedMethods.methodB:()V"); - output.shouldHaveExitValue(0); - } -} diff --git a/test/hotspot/jtreg/runtime/CommandLine/PrintTouchedMethodsJcmd.java b/test/hotspot/jtreg/runtime/CommandLine/PrintTouchedMethodsJcmd.java deleted file mode 100644 index 26f0c3ca0dc2b..0000000000000 --- a/test/hotspot/jtreg/runtime/CommandLine/PrintTouchedMethodsJcmd.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @bug 8025692 - * @summary Test jcmd PrintTouchedMethods VM.print_touched_methods - * @modules java.base/jdk.internal.misc - * java.management - * @library /test/lib - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+LogTouchedMethods PrintTouchedMethodsJcmd - */ - -import jdk.test.lib.process.OutputAnalyzer; -import jdk.test.lib.JDKToolFinder; - -public class PrintTouchedMethodsJcmd { - - public static void main(String args[]) throws Exception { - var pid = Long.toString(ProcessHandle.current().pid()); - var pb = new ProcessBuilder(); - pb.command(new String[] {JDKToolFinder.getJDKTool("jcmd"), pid, "VM.print_touched_methods"}); - var output = new OutputAnalyzer(pb.start()); - output.shouldContain("PrintTouchedMethodsJcmd.main:([Ljava/lang/String;)V"); - } -} diff --git a/test/hotspot/jtreg/runtime/CommandLine/TestLogTouchedMethods.java b/test/hotspot/jtreg/runtime/CommandLine/TestLogTouchedMethods.java deleted file mode 100644 index 57996b383d929..0000000000000 --- a/test/hotspot/jtreg/runtime/CommandLine/TestLogTouchedMethods.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* used by PrintTouchedMethods.java */ -public class TestLogTouchedMethods { - public static void main(String[] args) { - new TestLogTouchedMethods().methodA(); - } - - public void methodA() {} // called - public void methodB() {} // this should not be called -} diff --git a/test/hotspot/jtreg/runtime/LoadClass/TestResize.java b/test/hotspot/jtreg/runtime/LoadClass/TestResize.java index ab36afa92bd2d..56ae7e9b78d87 100644 --- a/test/hotspot/jtreg/runtime/LoadClass/TestResize.java +++ b/test/hotspot/jtreg/runtime/LoadClass/TestResize.java @@ -132,7 +132,7 @@ public static void main(String[] args) throws Exception { // -Xlog:safepoint+cleanup will print out cleanup details at safepoint // that will allow us to detect if the system dictionary resized. ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintClassLoaderDataGraphAtExit", - "-Xlog:safepoint+cleanup", + "-Xlog:safepoint+cleanup,class+loader+data", "TriggerResize", String.valueOf(CLASSES_TO_LOAD)); analyzeOutputOn(pb); diff --git a/test/hotspot/jtreg/serviceability/dcmd/vm/DictionaryStatsTest.java b/test/hotspot/jtreg/serviceability/dcmd/vm/DictionaryStatsTest.java new file mode 100644 index 0000000000000..c51e94e1e3569 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/dcmd/vm/DictionaryStatsTest.java @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of diagnostic command VM.systemdictionary which prints dictionary stats + * @library /test/lib + * @modules java.base/jdk.internal.misc + * jdk.compiler + * jdk.internal.jvmstat/sun.jvmstat.monitor + * @run testng DictionaryStatsTest + */ + +import org.testng.Assert; +import org.testng.annotations.Test; + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.dcmd.CommandExecutor; +import jdk.test.lib.dcmd.JMXExecutor; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.lang.ref.Reference; + +public class DictionaryStatsTest { + + // Expecting some output like: + + // System Dictionary for 'TestClassLoader' @10ba88b9 class loader statistics: + // Number of buckets : 128 = 1024 bytes, each 8 + // Number of entries : 6 = 96 bytes, each 16 + // Number of literals : 6 = 96 bytes, avg 16.000 + // Total footprint : = 1216 bytes + // Average bucket size : 0.047 + // Variance of bucket size : 0.045 + // Std. dev. of bucket size: 0.211 + // Maximum bucket size : 1 + + public void run(CommandExecutor executor) throws ClassNotFoundException { + + ClassLoader named_cl = new TestClassLoader("TestClassLoader", null); + Class c2 = Class.forName("TestClass2", true, named_cl); + if (c2.getClassLoader() != named_cl) { + Assert.fail("TestClass defined by wrong classloader: " + c2.getClassLoader()); + } + + // First test: simple output, no classes displayed + OutputAnalyzer output = executor.execute("VM.systemdictionary"); + output.shouldContain("System Dictionary for 'bootstrap'"); + output.shouldMatch("System Dictionary for 'TestClassLoader'"); + output.shouldContain("class loader statistics:"); + output.shouldContain("Number of buckets"); + output.shouldContain("Number of entries"); + output.shouldContain("Number of literals"); + output.shouldContain("Total footprint"); + output.shouldContain("Average bucket size"); + output.shouldContain("Variance of bucket size"); + output.shouldContain("Std. dev. of bucket size"); + output.shouldContain("Maximum bucket size"); + + // what is this? + Reference.reachabilityFence(named_cl); + } + + static class TestClassLoader extends ClassLoader { + + public TestClassLoader() { + super(); + } + + public TestClassLoader(String name, ClassLoader parent) { + super(name, parent); + } + + public static final String CLASS_NAME = "TestClass2"; + + static ByteBuffer readClassFile(String name) + { + File f = new File(System.getProperty("test.classes", "."), + name); + try (FileInputStream fin = new FileInputStream(f); + FileChannel fc = fin.getChannel()) + { + return fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size()); + } catch (IOException e) { + Assert.fail("Can't open file: " + name, e); + } + + /* Will not reach here as Assert.fail() throws exception */ + return null; + } + + protected Class loadClass(String name, boolean resolve) + throws ClassNotFoundException + { + Class c; + if (!CLASS_NAME.equals(name)) { + c = super.loadClass(name, resolve); + } else { + // should not delegate to the system class loader + c = findClass(name); + if (resolve) { + resolveClass(c); + } + } + return c; + } + + protected Class findClass(String name) + throws ClassNotFoundException + { + if (!CLASS_NAME.equals(name)) { + throw new ClassNotFoundException("Unexpected class: " + name); + } + return defineClass(name, readClassFile(name + ".class"), null); + } + + } + + @Test + public void jmx() throws ClassNotFoundException { + run(new JMXExecutor()); + } + +} + +class TestClass2 { + static { + Runnable r = () -> System.out.println("Hello"); + r.run(); + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/vthread/GetSetLocalTest/libGetSetLocalTest.cpp b/test/hotspot/jtreg/serviceability/jvmti/vthread/GetSetLocalTest/libGetSetLocalTest.cpp index 0ed229ad86b8d..4d95a36bcfec3 100644 --- a/test/hotspot/jtreg/serviceability/jvmti/vthread/GetSetLocalTest/libGetSetLocalTest.cpp +++ b/test/hotspot/jtreg/serviceability/jvmti/vthread/GetSetLocalTest/libGetSetLocalTest.cpp @@ -350,7 +350,7 @@ Breakpoint(jvmtiEnv *jvmti, JNIEnv* jni, jthread vthread, const char* virt = jni->IsVirtualThread(vthread) ? "virtual" : "carrier"; const jint depth = 0; // the depth is always 0 in case of breakpoint - LOG("Breakpoint: %s on %s thread: %s - Started\n", mname, virt, tname); + LOG("\nBreakpoint: %s on %s thread: %s - Started\n", mname, virt, tname); // disable BREAKPOINT events jvmtiError err = jvmti->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_BREAKPOINT, vthread); @@ -360,7 +360,12 @@ Breakpoint(jvmtiEnv *jvmti, JNIEnv* jni, jthread vthread, { int frame_count = get_frame_count(jvmti, jni, vthread); + test_GetSetLocal(jvmti, jni, vthread, depth, frame_count, true /* at_event */); + + // vthread passed to callback has to refer to current thread, + // so we can also test with NULL in place of vthread. + test_GetSetLocal(jvmti, jni, NULL, depth, frame_count, true /* at_event */); } deallocate(jvmti, jni, (void*)mname); deallocate(jvmti, jni, (void*)tname); diff --git a/test/hotspot/jtreg/serviceability/jvmti/vthread/VThreadTest/libVThreadTest.cpp b/test/hotspot/jtreg/serviceability/jvmti/vthread/VThreadTest/libVThreadTest.cpp index f993b355c9477..a19387f8633de 100644 --- a/test/hotspot/jtreg/serviceability/jvmti/vthread/VThreadTest/libVThreadTest.cpp +++ b/test/hotspot/jtreg/serviceability/jvmti/vthread/VThreadTest/libVThreadTest.cpp @@ -156,9 +156,7 @@ test_GetCarrierThread(jvmtiEnv *jvmti, JNIEnv *jni, jthread thread, jthread vthr // #1: Test JVMTI GetCarrierThread extension function with NULL vthread err = GetCarrierThread(jvmti, jni, NULL, &vthread_thread); - if (err != JVMTI_ERROR_INVALID_THREAD) { - fatal(jni, "event handler: JVMTI GetCarrierThread with NULL vthread failed to return JVMTI_ERROR_INVALID_THREAD"); - } + check_jvmti_status(jni, err, "event handler: error in JVMTI GetCarrierThread"); // #2: Test JVMTI GetCarrierThread extension function with a bad vthread err = GetCarrierThread(jvmti, jni, thread, &vthread_thread); diff --git a/test/hotspot/jtreg/serviceability/sa/ClhsdbPstack.java b/test/hotspot/jtreg/serviceability/sa/ClhsdbPstack.java index 43b2b90e0fa66..662b76b57cef0 100644 --- a/test/hotspot/jtreg/serviceability/sa/ClhsdbPstack.java +++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbPstack.java @@ -78,7 +78,6 @@ public static void main(String[] args) throws Exception { expStrMap.put("pstack -v", List.of( "No deadlocks found", "Common-Cleaner", "Signal Dispatcher", "CompilerThread", - "Sweeper thread", "Service Thread", "Reference Handler", "Finalizer", "main")); } diff --git a/test/hotspot/jtreg/serviceability/sa/ClhsdbWhere.java b/test/hotspot/jtreg/serviceability/sa/ClhsdbWhere.java index 4b665ac002df6..f3dab6dd9762c 100644 --- a/test/hotspot/jtreg/serviceability/sa/ClhsdbWhere.java +++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbWhere.java @@ -54,7 +54,6 @@ public static void main(String[] args) throws Exception { expStrMap.put("where -a", List.of( "Java Stack Trace for Service Thread", "Java Stack Trace for Common-Cleaner", - "Java Stack Trace for Sweeper thread", "CompilerThread", "Java Stack Trace for Finalizer", "Java Stack Trace for Signal Dispatcher", diff --git a/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowMultiJVM/ShrinkGrowMultiJVM.java b/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowMultiJVM/ShrinkGrowMultiJVM.java index d68f46e88adf3..c59f8cd9e9e30 100644 --- a/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowMultiJVM/ShrinkGrowMultiJVM.java +++ b/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowMultiJVM/ShrinkGrowMultiJVM.java @@ -58,7 +58,7 @@ public class ShrinkGrowMultiJVM { .resolve("java") .toAbsolutePath() .toString(), - "-Xlog:gc:gc_$i.log", // LOG_GC_ARG_INDEX + "UNSET_LOG_GC_ARG", // LOG_GC_ARG_INDEX "-XX:MetaspaceSize=10m", "-XX:MaxMetaspaceSize=20m", "-cp", @@ -81,7 +81,7 @@ public static void main(String argv[]) { for (int i = 0; i < 5; i++) { // will be used as jvm id args[args.length - 1] = "jvm#" + i; - args[LOG_GC_ARG_INDEX] = "-Xlog:gc:gc_" + i + ".log"; + args[LOG_GC_ARG_INDEX] = "-Xlog:gc*:gc_" + i + ".log::filecount=0"; ProcessBuilder pb = new ProcessBuilder(args); try { Process p = pb.start(); diff --git a/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowTest/ShrinkGrowTest.java b/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowTest/ShrinkGrowTest.java index d8c90e277c90e..f8a61c82d6109 100644 --- a/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowTest/ShrinkGrowTest.java +++ b/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowTest/ShrinkGrowTest.java @@ -145,6 +145,7 @@ private void go() { // step 2: try to load one more class // it should be impossible try { + log("and finally, a wafer-thin mint"); eatALittleMemory(); throwFault("We haven't cleaned metaspace yet!"); } catch (OutOfMemoryError error) { diff --git a/test/hotspot/jtreg/vmTestbase/vm/jit/LongTransitions/LTTest.java b/test/hotspot/jtreg/vmTestbase/vm/jit/LongTransitions/LTTest.java index 7599196053448..945f59e3e33d5 100644 --- a/test/hotspot/jtreg/vmTestbase/vm/jit/LongTransitions/LTTest.java +++ b/test/hotspot/jtreg/vmTestbase/vm/jit/LongTransitions/LTTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -6218,6 +6218,14 @@ private static boolean chkFile() fisJava.read(javaData); for ( int cnt=0;cnt, >= + {"string(//Customer[Age > 0]/Name)", "name2"}, + {"string(//Customer[Age < 0]/Name)", "name3"}, + {"string(//Customer[Age = 0]/Name)", "name1"}, + {"count(//Customer[Age >= 0 and Age <= 0])", 1}, + {"count(//Customer[Age >= 0][Age <= 0])", 1}, + {"count(//Customer[Age > 0 or Age < 0])", 2}, + {"count(//Customer[Age != 0])", 2}, + + // arithmetic operators: +, -, *, div, mod + {"string(//Customer[last() div 2]/Name)", "name1"}, + {"string(//Customer[position() * 2 > last()]/Name)", "name2"}, + {"string(//Customer[position() + 1 < last()]/Name)", "name1"}, + {"string(//Customer[last() - 1]/Name)", "name2"}, + {"string(//Customer[last() mod 2]/Name)", "name1"}, + + // union operator: | + {"count(//Customer[Name='name1'] | //Customer[Name='name2'])", + 2}, + {"count(//Customer[Name='name1'] | //Customer[Name='name2'] |" + + " //Customer[Name='name3'])", 3}, + + // operator precedence + {"1 + 2 * 3 + 3", 10.0}, + {"1 + 1 div 2 + 2", 3.5}, + {"1 + 1 mod 2 + 2", 4.0}, + {"1 * 1 mod 2 div 2", 0}, + {"1 * (1 mod 2) div 2", 0.5}, + {"(1 + 2) * (3 + 3)", 18.0}, + {"(1 + 2) div (3 + 3)", 0.5}, + {"1 - 2 < 3 + 3", true}, + {"1 * 2 >= 3 div 3", true}, + {"3 > 2 > 1", false}, + {"3 > (2 > 1)", true}, + {"3 > 2 = 1", true}, + {"1 = 3 > 2", true}, + {"1 = 2 or 1 <= 2 and 2 != 2", false}, + }; + } + + /* + * DataProvider for testing XPathExpressionException being thrown on + * invalid operator usage. + * Data columns: + * see parameters of the test "testExceptionOnEval" + */ + @DataProvider(name = "exceptionExpTestCases") + public Object[][] getExceptionExp() { + return new Object[][]{ + // invalid operators + {"string(//Customer[last() / 2]/Name)"}, + {"string(//Customer[last() % 2]/Name)"}, + {"count(//Customer[Name='name1'] & //Customer[Name='name2'])"}, + {"count(//Customer[Name='name1'] && //Customer[Name='name2'])"}, + {"count(//Customer[Name='name1'] || //Customer[Name='name2'])"}, + + // union operator only works for node-sets + {"//Customer[Name='name1'] | string(//Customer[Name='name2']))"}, + }; + } + + /** + * Verifies that the result of evaluating XPath operators matches the + * expected result. + * + * @param exp XPath expression + * @param expected expected result + * @throws Exception if test fails + */ + @Test(dataProvider = "operatorExpTestCases") + void testOperatorExp(String exp, Object expected) throws Exception { + if (expected instanceof Double d) { + testExp(doc, exp, d, Double.class); + } else if (expected instanceof String s) { + testExp(doc, exp, s, String.class); + } else if (expected instanceof Boolean b) { + testExp(doc, exp, b, Boolean.class); + } + } + + /** + * Verifies that XPathExpressionException is thrown on xpath evaluation. + * + * @param exp XPath expression + */ + @Test(dataProvider = "exceptionExpTestCases") + void testExceptionOnEval(String exp) { + Assert.assertThrows(XPathExpressionException.class, () -> testEval(doc, + exp)); + } +} diff --git a/test/jdk/ProblemList-Xcomp.txt b/test/jdk/ProblemList-Xcomp.txt index 7569ca406e9bf..df1b9da12b014 100644 --- a/test/jdk/ProblemList-Xcomp.txt +++ b/test/jdk/ProblemList-Xcomp.txt @@ -29,8 +29,3 @@ java/lang/invoke/MethodHandles/CatchExceptionTest.java 8146623 generic-all java/lang/ref/ReferenceEnqueue.java 8284236 generic-all - -java/lang/Integer/BitTwiddle.java 8291649 generic-x64 -java/lang/Long/BitTwiddle.java 8291649 generic-x64 -java/util/zip/TestCRC32C.java 8291649 generic-x64 -java/util/zip/TestChecksum.java 8291649 generic-x64 diff --git a/test/jdk/ProblemList.txt b/test/jdk/ProblemList.txt index 030861d5dadc2..784bbc20ea6de 100644 --- a/test/jdk/ProblemList.txt +++ b/test/jdk/ProblemList.txt @@ -121,7 +121,6 @@ java/awt/Focus/AutoRequestFocusTest/AutoRequestFocusToFrontTest.java 6848406 gen java/awt/Focus/AutoRequestFocusTest/AutoRequestFocusSetVisibleTest.java 6848407 generic-all java/awt/Focus/UnaccessibleChoice/AccessibleChoiceTest.java 8239801 macosx-all java/awt/Frame/MaximizedUndecorated/MaximizedUndecorated.java 8022302 generic-all -java/awt/Frame/FrameLocation/FrameLocation.java 8238436 linux-all java/awt/FileDialog/FileDialogIconTest/FileDialogIconTest.java 8160558 windows-all java/awt/event/MouseWheelEvent/InfiniteRecursion/InfiniteRecursion.java 8060176 windows-all,macosx-all java/awt/event/MouseWheelEvent/InfiniteRecursion/InfiniteRecursion_1.java 8060176 windows-all,macosx-all @@ -139,7 +138,6 @@ java/awt/EventQueue/6980209/bug6980209.java 8198615 macosx-all java/awt/Frame/ExceptionOnSetExtendedStateTest/ExceptionOnSetExtendedStateTest.java 8198237 macosx-all java/awt/grab/EmbeddedFrameTest1/EmbeddedFrameTest1.java 7080150 macosx-all java/awt/event/InputEvent/EventWhenTest/EventWhenTest.java 8168646 generic-all -java/awt/KeyboardFocusmanager/TypeAhead/SubMenuShowTest/SubMenuShowTest.java 8273520 macosx-all java/awt/Mixing/AWT_Mixing/HierarchyBoundsListenerMixingTest.java 8049405 macosx-all java/awt/Mixing/AWT_Mixing/OpaqueOverlappingChoice.java 8048171 generic-all java/awt/Mixing/AWT_Mixing/JMenuBarOverlapping.java 8159451 linux-all,windows-all,macosx-all @@ -495,7 +493,7 @@ java/lang/invoke/LFCaching/LFMultiThreadCachingTest.java 8151492 generic- java/lang/invoke/LFCaching/LFGarbageCollectedTest.java 8078602 generic-all java/lang/invoke/lambda/LambdaFileEncodingSerialization.java 8249079 linux-x64 java/lang/invoke/RicochetTest.java 8251969 generic-all -java/lang/ProcessBuilder/PipelineLeaksFD.java 8291760 linux-all +jdk/internal/misc/TerminatingThreadLocal/TestTerminatingThreadLocal.java 8292051 generic-all ############################################################################ @@ -652,7 +650,7 @@ javax/swing/JWindow/ShapedAndTranslucentWindows/SetShapeAndClickSwing.java 80134 javax/swing/JWindow/ShapedAndTranslucentWindows/TranslucentJComboBox.java 8024627 macosx-all # The next test below is an intermittent failure javax/swing/JTree/DnD/LastNodeLowerHalfDrop.java 8159131 linux-all -javax/swing/JTree/4633594/JTreeFocusTest.java 8173125 macosx-all +javax/swing/JTree/4633594/JTreeFocusTest.java 7105441 macosx-all javax/swing/AbstractButton/6711682/bug6711682.java 8060765 windows-all,macosx-all javax/swing/JFileChooser/6396844/TwentyThousandTest.java 8198003 generic-all javax/swing/JPopupMenu/6800513/bug6800513.java 7184956 macosx-all @@ -763,22 +761,23 @@ jdk/jfr/api/consumer/TestRecordingFileWrite.java 8287699 linux-x6 ############################################################################ # Client manual tests +javax/swing/JFileChooser/6698013/bug6698013.java 8024419 macosx-all +javax/swing/JColorChooser/8065098/bug8065098.java 8065647 macosx-all +javax/swing/JTabbedPane/4666224/bug4666224.html 8144124 macosx-all +javax/swing/SwingUtilities/TestTextPosInPrint.java 8227025 windows-all + java/awt/event/MouseEvent/SpuriousExitEnter/SpuriousExitEnter_1.java 7131438,8022539 generic-all java/awt/event/MouseEvent/SpuriousExitEnter/SpuriousExitEnter_2.java 7131438,8022539 generic-all java/awt/Modal/WsDisabledStyle/CloseBlocker/CloseBlocker.java 7187741 linux-all,macosx-all java/awt/xembed/server/TestXEmbedServerJava.java 8001150,8004031 generic-all -javax/swing/JFileChooser/6698013/bug6698013.java 8024419 macosx-all -javax/swing/JColorChooser/8065098/bug8065098.java 8065647 macosx-all java/awt/Modal/PrintDialogsTest/PrintDialogsTest.java 8068378 generic-all java/awt/dnd/DnDFileGroupDescriptor/DnDFileGroupDescriptor.html 8080185 macosx-all,linux-all -javax/swing/JTabbedPane/4666224/bug4666224.html 8144124 macosx-all java/awt/event/MouseEvent/AltGraphModifierTest/AltGraphModifierTest.java 8162380 generic-all java/awt/image/VolatileImage/VolatileImageConfigurationTest.java 8171069 macosx-all,linux-all java/awt/Modal/InvisibleParentTest/InvisibleParentTest.java 8172245 linux-all java/awt/print/Dialog/RestoreActiveWindowTest/RestoreActiveWindowTest.java 8185429 macosx-all java/awt/TrayIcon/DblClickActionEventTest/DblClickActionEventTest.html 8203867 macosx-all java/awt/Frame/FrameStateTest/FrameStateTest.html 8203920 macosx-all,linux-all -javax/swing/SwingUtilities/TestTextPosInPrint.java 8227025 windows-all java/awt/print/PrinterJob/ScaledText/ScaledText.java 8231226 macosx-all java/awt/font/TextLayout/TestJustification.html 8250791 macosx-all java/awt/TrayIcon/DragEventSource/DragEventSource.java 8252242 macosx-all diff --git a/test/jdk/com/sun/crypto/provider/Cipher/Test4958071.java b/test/jdk/com/sun/crypto/provider/Cipher/Test4958071.java new file mode 100644 index 0000000000000..4a747518dfbe9 --- /dev/null +++ b/test/jdk/com/sun/crypto/provider/Cipher/Test4958071.java @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @library /test/lib + * @bug 4958071 + * @summary verify InvalidParameterException for Cipher.init + */ + +import jdk.test.lib.Utils; + +import javax.crypto.Cipher; +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import java.security.AlgorithmParameters; +import java.security.InvalidParameterException; +import java.security.SecureRandom; +import java.security.cert.Certificate; +import java.security.spec.AlgorithmParameterSpec; + +public class Test4958071 { + + public boolean execute() throws Exception { + + KeyGenerator aesKey = KeyGenerator.getInstance("AES"); + aesKey.init(128); + SecretKey generatedAESKey = aesKey.generateKey(); + + Cipher c = Cipher.getInstance("AES"); + + SecureRandom nullSR = null; + AlgorithmParameters nullAP = null; + AlgorithmParameterSpec nullAPS = null; + Certificate nullCert = null; + + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + generatedAESKey), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + generatedAESKey), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + generatedAESKey, nullSR), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + generatedAESKey, nullSR), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + generatedAESKey, nullAP), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + generatedAESKey, nullAP), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + generatedAESKey, nullAP, nullSR), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + generatedAESKey, nullAP, nullSR), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + generatedAESKey, nullAPS), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + generatedAESKey, nullAPS), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + generatedAESKey, nullAPS, nullSR), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + generatedAESKey, nullAPS, nullSR), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + nullCert), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + nullCert), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.ENCRYPT_MODE - 1), + nullCert, nullSR), InvalidParameterException.class); + Utils.runAndCheckException(() -> c.init((Cipher.UNWRAP_MODE + 1), + nullCert, nullSR), InvalidParameterException.class); + + return true; + } + + public static void main(String[] args) throws Exception { + + Test4958071 test = new Test4958071(); + + if (test.execute()) { + System.out.println(test.getClass().getName() + ": passed!"); + } + + } +} diff --git a/test/jdk/com/sun/crypto/provider/Mac/Test6205692.java b/test/jdk/com/sun/crypto/provider/Mac/Test6205692.java new file mode 100644 index 0000000000000..55d26b57bf18a --- /dev/null +++ b/test/jdk/com/sun/crypto/provider/Mac/Test6205692.java @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @library /test/lib + * @bug 6205692 + * @summary verify MacSpi NPE on engineUpdate(ByteBuffer) + */ + +import jdk.test.lib.Utils; + +import javax.crypto.MacSpi; +import java.nio.ByteBuffer; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.Key; +import java.security.spec.AlgorithmParameterSpec; + +public class Test6205692 { + + public boolean execute() throws Exception { + + ByteBuffer byteBuffer = null; + + MyMacSpi myMacSpi = new MyMacSpi(); + + Utils.runAndCheckException(() -> myMacSpi.engineUpdate(byteBuffer), + NullPointerException.class); + + return true; + } + + public static void main(String[] args) throws Exception { + Test6205692 test = new Test6205692(); + + if (test.execute()) { + System.out.println(test.getClass().getName() + ": passed!"); + } + } + + private static class MyMacSpi extends MacSpi { + + /* + * This is the important part; the rest is blank mandatory overrides + */ + public void engineUpdate(ByteBuffer input) { + super.engineUpdate(input); + } + + @Override + protected int engineGetMacLength() { + return 0; + } + + @Override + protected void engineInit(Key key, AlgorithmParameterSpec params) + throws InvalidKeyException, InvalidAlgorithmParameterException { + } + + @Override + protected void engineUpdate(byte input) { + } + + @Override + protected void engineUpdate(byte[] input, int offset, int len) { + } + + @Override + protected byte[] engineDoFinal() { + return new byte[0]; + } + + @Override + protected void engineReset() { + } + } +} diff --git a/test/jdk/com/sun/jdi/CLETest.java b/test/jdk/com/sun/jdi/CLETest.java new file mode 100644 index 0000000000000..fd5d8c23d57f6 --- /dev/null +++ b/test/jdk/com/sun/jdi/CLETest.java @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8292217 + * @summary Test co-located events (CLE) for MethodEntry, SingleStep, and Breakpoint events. + * @run build TestScaffold VMConnection TargetListener TargetAdapter + * @run compile -g CLETest.java + * @run driver CLETest + */ + +import com.sun.jdi.*; +import com.sun.jdi.event.*; +import com.sun.jdi.request.*; +import java.util.*; + +class t1 { + public static void foo() { + } +} +class t2 { + public static void foo() { + } +} + +/* + * The debuggee has a large number of breakpoints pre-setup to help control the test. + * They are each hit just once, and in the order of their number. No instructions in the + * debuggee are ever executed more than once. + * + * NOTE: the breakpoints are sensitive to the their line number within the method. + * If that changes, then the "breakpoints" table needs to be updated. + */ +class CLEDebugee { + public static void main(String[] args) { + runTests(); + } + + public static void runTests() { + test1(); + test2(); + test3(); // BREAKPOINT_3 + test4(); // BREAKPOINT_5 + test5(); // BREAKPOINT_7 + test6(); // BREAKPOINT_9 + } + + // test1 and test2 are testing for the bug described in 8292217. For this test MethodEntry + // events are enabled when we hit the breakpoint, and we single step OVER (test1) or + // INTO (test2) an instruction with an unresolved contant pool entry. The Debugger will + // verify that the generated MethodEntry events during class loading are not improperly + // co-located as described the the CR. + public static void test1() { + t1.foo(); // BREAKPOINT_1 + } + public static void test2() { + t2.foo(); // BREAKPOINT_2 + } + + // Tests that MethodEntry, Step, and Breakpoint events that occur at the same + // location are properly co-located in the same EventSet. MethodEntry and Step + // are enabled when we hit BREAKPOINT_3 above. When the BreakpointEvent for + // BREAKPOINT_4 is generated, the EventSet should also include a StepEvent + // and a MethodEntryEvent. + public static void test3() { + int x = 1; // BREAKPOINT_4 + } + + // Same as test3 but only check for co-located MethodEntry and Breakpoint events. + // MethodEntry is enabled when we hit BREAKPOINT_5 above. StepEvent is not enabled. + // When the BreakpointEvent for BREAKPOINT_6 is generated, the EventSet should also + // include a MethodEntryEvent. + public static void test4() { + int x = 1; // BREAKPOINT_6 + } + + // Same as test3 but only check for co-located Step and Breakpoint events. + // StepEvents are enabled when we hit BREAKPOINT_7 above. When the BreakpointEvent + // for BREAKPOINT_8 is generated, the EventSet should also include a StepEvent, + public static void test5() { + int x = 1; // BREAKPOINT_8 + } + + // Same as test3 but only check for co-located MethodEntry and Step events. + // MethodEntry and Step events are enabled when we hit BREAKPOINT_9 above. When + // the StepEvent is received, the EventSet should also include the MethodEntryEvent. + public static void test6() { + int x = 1; + } +} + +public class CLETest extends TestScaffold { + ClassType targetClass; + EventRequestManager erm; + StepRequest stepRequest; + MethodEntryRequest entryRequest; + MethodExitRequest exitRequest; + int methodEntryCount = 0; + int breakpointCount = 0; + boolean testcaseFailed = false; + int testcase = 0; + + CLETest(String args[]) { + super(args); + } + + public static void main(String[] args) throws Exception { + CLETest cle = new CLETest(args); + cle.startTests(); + } + + static class MethodBreakpointData { + final String method; + final String signature; + final int lineNumber; + public MethodBreakpointData(String method, String signature, int lineNumber) { + this.method = method; + this.signature = signature; + this.lineNumber = lineNumber; + } + } + + // Table of all breakpoints based on method name and sig, plus the line number within the method. + static MethodBreakpointData[] breakpoints = new MethodBreakpointData[] { + new MethodBreakpointData("runTests", "()V", 3), // BREAKPOINT_3 + new MethodBreakpointData("runTests", "()V", 4), // BREAKPOINT_5 + new MethodBreakpointData("runTests", "()V", 5), // BREAKPOINT_7 + new MethodBreakpointData("runTests", "()V", 6), // BREAKPOINT_9 + new MethodBreakpointData("test1", "()V", 1), // BREAKPOINT_1 + new MethodBreakpointData("test2", "()V", 1), // BREAKPOINT_2 + new MethodBreakpointData("test3", "()V", 1), // BREAKPOINT_4 + new MethodBreakpointData("test4", "()V", 1), // BREAKPOINT_6 + new MethodBreakpointData("test5", "()V", 1) // BREAKPOINT_8 + }; + + public static void printStack(ThreadReference thread) { + try { + List frames = thread.frames(); + Iterator iter = frames.iterator(); + while (iter.hasNext()) { + StackFrame frame = iter.next(); + System.out.println(getLocationString(frame.location())); + } + } catch (Exception e) { + System.out.println("printStack: exception " + e); + } + } + + public static String getLocationString(Location loc) { + return + loc.declaringType().name() + "." + + loc.method().name() + ":" + + loc.lineNumber(); + } + + /* + * Returns true if the specified event types are all co-located in this EventSet, + * and no other events are included. Note that the order of the events (when present) + * is required to be: MethodEntryEvent, StepEvent, BreakpointEvent. + */ + public boolean isColocated(EventSet set, boolean needEntry, boolean needStep, boolean needBreakpoint) { + int expectedSize = (needEntry ? 1 : 0) + (needStep ? 1 : 0) + (needBreakpoint ? 1 : 0); + if (set.size() != expectedSize) { + return false; + } + EventIterator iter = set.eventIterator(); + if (needEntry) { + Event meEvent = iter.next(); + if (!(meEvent instanceof MethodEntryEvent)) { + return false; + } + } + if (needStep) { + Event ssEvent = iter.next(); + if (!(ssEvent instanceof StepEvent)) { + return false; + } + } + if (needBreakpoint) { + Event bpEvent = iter.next(); + if (!(bpEvent instanceof BreakpointEvent)) { + return false; + } + } + return true; + } + + public void eventSetReceived(EventSet set) { + System.out.println("\nEventSet for test case #" + testcase + ": " + set); + switch (testcase) { + case 1: + case 2: { + // During the first two test cases we should never receive an EventSet with + // more than one Event in it. + if (set.size() != 1) { + testcaseFailed = true; + // For now, we expect these two test cases to fail due to 8292217, + // so don't fail the overall test run as a result of these failures. + // testFailed = true; + System.out.println("TESTCASE #" + testcase + " FAILED (ignoring): too many events in EventSet: " + set.size()); + } + break; + } + case 3: { + // At some point during test3 we should receive co-located MethodEntry, Step, and Breakpoint events. + if (isColocated(set, true, true, true)) { + testcaseFailed = false; + } + break; + } + case 4: { + // At some point during test4 we should receive co-located MethodEntry and Breakpoint events. + if (isColocated(set, true, false, true)) { + testcaseFailed = false; + } + break; + } + case 5: { + // At some point during test5 we should receive co-located Step and Breakpoint events. + if (isColocated(set, false, true, true)) { + testcaseFailed = false; + } + break; + } + case 6: { + // At some point during test6 we should receive co-located MethodEntry and Step events. + if (isColocated(set, true, true, false)) { + testcaseFailed = false; + } + break; + } + } + } + + /* + * Most of the control flow of the test is handled via breakpoints. There is one at the start + * of each test case that is used to enable other events that we check for during the test case. + * In some cases there is an additional Breakpoint enabled for the test cases that is + * also used to determine when the test case is complete. Other test cases are completed + * when a Step or MethodEntry event arrives. + */ + public void breakpointReached(BreakpointEvent event) { + breakpointCount++; + if (breakpointCount != 4 && breakpointCount != 6 && breakpointCount != 8) { + testcase++; + } + System.out.println("Got BreakpointEvent(" + breakpointCount + "): " + getLocationString(event.location())); + event.request().disable(); + + // Setup test1. Completion is checked for in stepCompleted(). + if (breakpointCount == 1) { + testcaseFailed = false; // assume passing unless error detected + entryRequest.enable(); + exitRequest.enable(); + stepRequest = erm.createStepRequest(mainThread, + StepRequest.STEP_LINE, + StepRequest.STEP_OVER); + stepRequest.addCountFilter(1); + stepRequest.setSuspendPolicy(EventRequest.SUSPEND_ALL); + stepRequest.enable(); + } + + // Setup test2. Completion is checked for in stepCompleted(). + if (breakpointCount == 2) { + testcaseFailed = false; // assume passing unless error detected + entryRequest.enable(); + exitRequest.enable(); + stepRequest = erm.createStepRequest(mainThread, + StepRequest.STEP_LINE, + StepRequest.STEP_INTO); + stepRequest.addCountFilter(1); + stepRequest.setSuspendPolicy(EventRequest.SUSPEND_ALL); + stepRequest.enable(); + } + + // Setup test3: MethodEntry, Step, and Breakpoint co-located events. + // Completion is handled by the next breakpoint being hit. + if (breakpointCount == 3) { + testcaseFailed = true; // assume failing unless pass detected + entryRequest.enable(); + stepRequest = erm.createStepRequest(mainThread, + StepRequest.STEP_LINE, + StepRequest.STEP_INTO); + stepRequest.addCountFilter(1); + stepRequest.setSuspendPolicy(EventRequest.SUSPEND_ALL); + stepRequest.enable(); + } + // Complete test3. We fail if we never saw the expected co-located events. + if (breakpointCount == 4) { + if (testcaseFailed) { + testFailed = true; + System.out.println("TESTCASE #3 FAILED: did not get MethodEntry, Step, and Breakpoint co-located events"); + } else { + System.out.println("TESTCASE #3 PASSED"); + } + } + + // Setup test4: MethodEntry and Breakpoint co-located events. + // Completion is handled by the next breakpoint being hit. + if (breakpointCount == 5) { + testcaseFailed = true; // assume failing unless pass detected + entryRequest.enable(); + } + // Complete test4. We fail if we never saw the expected co-located events. + if (breakpointCount == 6) { + entryRequest.disable(); + if (testcaseFailed) { + testFailed = true; + System.out.println("TESTCASE #4 FAILED: did not get MethodEntry and Breakpoint co-located events"); + } else { + System.out.println("TESTCASE #4 PASSED"); + } + } + + // Setup test5: Step and Breakpoint co-located events. + // Completion is handled by the next breakpoint being hit. + if (breakpointCount == 7) { + testcaseFailed = true; // assume failing unless pass detected + stepRequest = erm.createStepRequest(mainThread, + StepRequest.STEP_LINE, + StepRequest.STEP_INTO); + stepRequest.addCountFilter(1); + stepRequest.setSuspendPolicy(EventRequest.SUSPEND_ALL); + stepRequest.enable(); + } + // Complete test5. We fail if we never saw the expected co-located events. + if (breakpointCount == 8) { + if (testcaseFailed) { + testFailed = true; + System.out.println("TESTCASE #5 FAILED: did not get Step and Breakpoint co-located events"); + } else { + System.out.println("TESTCASE #5 PASSED"); + } + } + + // Setup test: MethodEntry and Step co-located events + // Completion is handled by the stepCompleted() since there is no additional breakpoint. + if (breakpointCount == 9) { + testcaseFailed = true; // assume failing unless pass detected + entryRequest.enable(); + stepRequest = erm.createStepRequest(mainThread, + StepRequest.STEP_LINE, + StepRequest.STEP_INTO); + stepRequest.addCountFilter(1); + stepRequest.setSuspendPolicy(EventRequest.SUSPEND_ALL); + stepRequest.enable(); + } + } + + public void stepCompleted(StepEvent event) { + System.out.println("Got StepEvent: " + getLocationString(event.location())); + event.request().disable(); + entryRequest.disable(); + if (testcase == 6 && testcaseFailed) { + testFailed = true; + System.out.println("TESTCASE #6 FAILED: did not get MethodEntry and Step co-located events"); + } + if (testcase == 1 || testcase == 2 || testcase == 6) { + exitRequest.disable(); + if (!testcaseFailed) { // We already did a println if the test failed. + System.out.println("TESTCASE #" + testcase + " PASSED"); + } + } + } + + public void methodEntered(MethodEntryEvent event) { + System.out.println("Got MethodEntryEvent: " + getLocationString(event.location())); + if (methodEntryCount++ == 25) { + entryRequest.disable(); // Just in case the test loses control. + } + } + + public void methodExited(MethodExitEvent event) { + System.out.println("Got MethodExitEvent: " + getLocationString(event.location())); + //printStack(event.thread()); + exitRequest.disable(); + entryRequest.disable(); + } + + protected void runTests() throws Exception { + System.out.println("Starting CLETest"); + BreakpointEvent bpe = startToMain("CLEDebugee"); + targetClass = (ClassType)bpe.location().declaringType(); + mainThread = bpe.thread(); + System.out.println("Got main thread: " + mainThread); + erm = eventRequestManager(); + + try { + // Setup all breakpoints + for (MethodBreakpointData bpData : breakpoints) { + Location loc = findMethodLocation(targetClass, bpData.method, + bpData.signature, bpData.lineNumber); + BreakpointRequest req = erm.createBreakpointRequest(loc); + req.setSuspendPolicy(EventRequest.SUSPEND_ALL); + req.enable(); + } + + // Ask for method entry events + entryRequest = erm.createMethodEntryRequest(); + entryRequest.addThreadFilter(mainThread); + entryRequest.setSuspendPolicy(EventRequest.SUSPEND_ALL); + + // Ask for method exit events + exitRequest = erm.createMethodExitRequest(); + exitRequest.addThreadFilter(mainThread); + exitRequest.setSuspendPolicy(EventRequest.SUSPEND_ALL); + + System.out.println("Waiting for events: "); + + listenUntilVMDisconnect(); + System.out.println("All done..."); + } catch (Exception ex){ + ex.printStackTrace(); + testFailed = true; + } + + if (!testFailed) { + println("CLETest: passed"); + } else { + throw new Exception("CLETest: failed"); + } + } +} diff --git a/test/jdk/com/sun/jdi/ClassUnloadEventTest.java b/test/jdk/com/sun/jdi/ClassUnloadEventTest.java index 360240b578a7b..7518175805523 100644 --- a/test/jdk/com/sun/jdi/ClassUnloadEventTest.java +++ b/test/jdk/com/sun/jdi/ClassUnloadEventTest.java @@ -54,7 +54,12 @@ public class ClassUnloadEventTest { public static void main(String[] args) throws Exception { if (args.length == 0) { - runDebuggee(); + try { + runDebuggee(); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } } else { runDebugger(); } @@ -96,7 +101,7 @@ private static void runDebuggee() { Class.forName(CLASS_NAME_PREFIX + index, true, loader); } } catch (Exception e) { - throw new RuntimeException("Failed to create Sample class"); + throw new RuntimeException("Failed to create Sample class", e); } } loader = null; @@ -109,6 +114,8 @@ private static void runDebuggee() { Thread.sleep(5000); } catch (InterruptedException e) { } + + System.out.println("Exiting debuggee"); } private static void runDebugger() throws Exception { @@ -169,6 +176,21 @@ private static void runDebugger() throws Exception { eventSet.resume(); } + /* Dump debuggee output. */ + Process p = vm.process(); + BufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream())); + BufferedReader err = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = in.readLine(); + while (line != null) { + System.out.println("stdout: " + line); + line = in.readLine(); + } + line = err.readLine(); + while (line != null) { + System.out.println("stderr: " + line); + line = err.readLine(); + } + if (unloadedSampleClasses.size() != NUM_CLASSES) { throw new RuntimeException("Wrong number of class unload events: expected " + NUM_CLASSES + " got " + unloadedSampleClasses.size()); } @@ -183,7 +205,7 @@ private static VirtualMachine connectAndLaunchVM() throws IOException, LaunchingConnector launchingConnector = Bootstrap.virtualMachineManager().defaultConnector(); Map arguments = launchingConnector.defaultArguments(); arguments.get("main").setValue(ClassUnloadEventTest.class.getName()); - arguments.get("options").setValue("--add-exports java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI"); + arguments.get("options").setValue("--add-exports java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xlog:class+unload=info -Xlog:gc"); return launchingConnector.launch(arguments); } } diff --git a/test/jdk/com/sun/jdi/TestScaffold.java b/test/jdk/com/sun/jdi/TestScaffold.java index e054353e3a548..1f351a3c6f491 100644 --- a/test/jdk/com/sun/jdi/TestScaffold.java +++ b/test/jdk/com/sun/jdi/TestScaffold.java @@ -847,6 +847,14 @@ public Location findLocation(ReferenceType rt, int lineNumber) return (Location)locs.get(0); } + public Location findMethodLocation(ReferenceType rt, String methodName, + String methodSignature, int methodLineNumber) + throws AbsentInformationException { + Method m = findMethod(rt, methodName, methodSignature); + int lineNumber = m.location().lineNumber() + methodLineNumber - 1; + return findLocation(rt, lineNumber); + } + public BreakpointEvent resumeTo(String clsName, String methodName, String methodSignature) { return resumeTo(clsName, methodName, methodSignature, false /* suspendThread */); diff --git a/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java b/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java index e51f7e38d40ee..a1fb82bcbb5ec 100644 --- a/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java +++ b/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java @@ -62,7 +62,7 @@ public static void main(String... args) throws Exception { createJavaProcessBuilder( "--add-exports", "jdk.attach/sun.tools.attach=ALL-UNNAMED", "-XX:+UseG1GC", // this will cause MaxNewSize to be FLAG_SET_ERGO - "-XX:+UseCodeAging", + "-XX:+UseCodeCacheFlushing", "-XX:+UseCerealGC", // Should be ignored. "-XX:Flags=" + flagsFile.getAbsolutePath(), "-Djdk.attach.allowAttachSelf", @@ -97,7 +97,7 @@ public static void main(String... args) throws Exception { // Not set, so should be default checkOrigin("ManagementServer", Origin.DEFAULT); // Set on the command line - checkOrigin("UseCodeAging", Origin.VM_CREATION); + checkOrigin("UseCodeCacheFlushing", Origin.VM_CREATION); // Set in _JAVA_OPTIONS checkOrigin("CheckJNICalls", Origin.ENVIRON_VAR); // Set in JAVA_TOOL_OPTIONS diff --git a/test/jdk/java/awt/Graphics2D/DrawString/DrawRotatedStringUsingRotatedFont.java b/test/jdk/java/awt/Graphics2D/DrawString/DrawRotatedStringUsingRotatedFont.java index 9648e68888ea3..1ab9e591a0192 100644 --- a/test/jdk/java/awt/Graphics2D/DrawString/DrawRotatedStringUsingRotatedFont.java +++ b/test/jdk/java/awt/Graphics2D/DrawString/DrawRotatedStringUsingRotatedFont.java @@ -37,7 +37,8 @@ /** * @test - * @bug 8065373 + * @bug 8065373 8289208 + * @key headful * @summary Verifies that we get correct direction, when draw rotated string. * @author Sergey Bylokhov * @run main DrawRotatedStringUsingRotatedFont diff --git a/test/jdk/java/lang/ProcessBuilder/PipelineLeaksFD.java b/test/jdk/java/lang/ProcessBuilder/PipelineLeaksFD.java index 4f572610b9d6f..d3c44bc92797f 100644 --- a/test/jdk/java/lang/ProcessBuilder/PipelineLeaksFD.java +++ b/test/jdk/java/lang/ProcessBuilder/PipelineLeaksFD.java @@ -25,10 +25,10 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; +import java.io.BufferedReader; import java.io.File; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; +import java.io.Writer; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashSet; @@ -37,9 +37,9 @@ /* * @test - * @bug 8289643 + * @bug 8289643 8291760 * @requires (os.family == "linux" & !vm.musl) - * @summary file descriptor leak with ProcessBuilder.startPipeline + * @summary File descriptor leak detection with ProcessBuilder.startPipeline * @run testng/othervm PipelineLeaksFD */ @@ -68,26 +68,27 @@ void checkForLeaks(List builders) throws IOException { Assert.fail("There should be at least 3 pipes before, (0, 1, 2)"); } - // Redirect all of the error streams to stdout (except the last) - // so those file descriptors are not left open - for (int i = 0; i < builders.size() - 1; i++) { - builders.get(i).redirectErrorStream(true); - } - List processes = ProcessBuilder.startPipeline(builders); // Write something through the pipeline - try (OutputStream out = processes.get(0).getOutputStream()) { - out.write('a'); + final String text = "xyz"; + try (Writer out = processes.get(0).outputWriter()) { + out.write(text); } - Process last = processes.get(processes.size() - 1); - try (InputStream inputStream = last.getInputStream(); - InputStream errorStream = last.getErrorStream()) { - byte[] bytes = inputStream.readAllBytes(); - Assert.assertEquals(bytes.length, 1, "stdout bytes read"); - byte[] errBytes = errorStream.readAllBytes(); - Assert.assertEquals(errBytes.length, 0, "stderr bytes read"); + // Read, check, and close all streams + for (int i = 0; i < processes.size(); i++) { + final Process p = processes.get(i); + String expectedOut = (i == processes.size() - 1) ? text : null; + String expectedErr = null; // EOF + try (BufferedReader inputStream = p.inputReader(); + BufferedReader errorStream = p.errorReader()) { + String outActual = inputStream.readLine(); + Assert.assertEquals(outActual, expectedOut, "stdout, process[ " + i + "]: " + p); + + String errActual = errorStream.readLine(); + Assert.assertEquals(errActual, expectedErr, "stderr, process[ " + i + "]: " + p); + } } processes.forEach(p -> waitForQuiet(p)); diff --git a/test/jdk/java/nio/channels/FileChannel/TransferToAppending.java b/test/jdk/java/nio/channels/FileChannel/TransferToAppending.java new file mode 100644 index 0000000000000..e5daac3812b63 --- /dev/null +++ b/test/jdk/java/nio/channels/FileChannel/TransferToAppending.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 8292562 + * @summary Test transferTo and transferFrom when target is appending + * @library /test/lib + * @build jdk.test.lib.RandomFactory + * @run main TransferToAppending + * @key randomness + */ + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.channels.FileChannel; +import java.util.Random; +import jdk.test.lib.RandomFactory; + +import static java.nio.file.StandardOpenOption.*; + +public class TransferToAppending { + private static final int MIN_SIZE = 128; + private static final int MAX_SIZE = 32768; + private static final Random RND = RandomFactory.getRandom(); + + public static void main(String... args) throws IOException { + // Create files in size range [MIN_SIZE,MAX_SIZE) + // filled with random bytes + Path source = createFile("src"); + Path target = createFile("tgt"); + + try (FileChannel src = FileChannel.open(source, READ, WRITE); + FileChannel tgt = FileChannel.open(target, WRITE, APPEND);) { + // Set source range to a subset of the source + long size = Files.size(source); + long position = RND.nextInt((int)size); + long count = RND.nextInt((int)(size - position)); + long tgtSize = Files.size(target); + + // Transfer subrange to target + long nbytes = src.transferTo(position, count, tgt); + + long expectedSize = tgtSize + nbytes; + + if (Files.size(target) != expectedSize) { + String msg = String.format("Bad size: expected %d, actual %d%n", + expectedSize, Files.size(target)); + throw new RuntimeException(msg); + } + + tgt.close(); + + // Load subrange of source + ByteBuffer bufSrc = ByteBuffer.allocate((int)nbytes); + src.read(bufSrc, position); + + try (FileChannel res = FileChannel.open(target, READ, WRITE)) { + // Load appended range of target + ByteBuffer bufTgt = ByteBuffer.allocate((int)nbytes); + res.read(bufTgt, tgtSize); + + // Subranges of values should be equal + if (bufSrc.mismatch(bufTgt) != -1) { + throw new RuntimeException("Range of values unequal"); + } + } + } finally { + Files.delete(source); + Files.delete(target); + } + } + + private static Path createFile(String name) throws IOException { + Path path = Files.createTempFile(name, ".dat"); + try (FileChannel fc = FileChannel.open(path, CREATE, READ, WRITE)) { + int size = Math.max(RND.nextInt(MAX_SIZE), 128); + byte[] b = new byte[size]; + RND.nextBytes(b); + fc.write(ByteBuffer.wrap(b)); + } + return path; + } +} diff --git a/test/jdk/java/util/concurrent/atomic/Serial.java b/test/jdk/java/util/concurrent/atomic/Serial.java index 2aa6cd4a7e1a1..664b9b921e7f5 100644 --- a/test/jdk/java/util/concurrent/atomic/Serial.java +++ b/test/jdk/java/util/concurrent/atomic/Serial.java @@ -64,8 +64,8 @@ static void testDoubleAdder() { } static void testDoubleAccumulator() { - DoubleBinaryOperator plus = (DoubleBinaryOperator & Serializable) (x, y) -> x + y; - DoubleAccumulator a = new DoubleAccumulator(plus, 13.9d); + DoubleBinaryOperator op = (DoubleBinaryOperator & Serializable) (x, y) -> Math.max(x, y); + DoubleAccumulator a = new DoubleAccumulator(op, Double.NEGATIVE_INFINITY); a.accumulate(17.5d); DoubleAccumulator result = echo(a); if (result.get() != a.get()) @@ -89,8 +89,8 @@ static void testLongAdder() { } static void testLongAccumulator() { - LongBinaryOperator plus = (LongBinaryOperator & Serializable) (x, y) -> x + y; - LongAccumulator a = new LongAccumulator(plus, -2); + LongBinaryOperator op = (LongBinaryOperator & Serializable) (x, y) -> Math.max(x, y); + LongAccumulator a = new LongAccumulator(op, Long.MIN_VALUE); a.accumulate(34); LongAccumulator result = echo(a); if (result.get() != a.get()) diff --git a/test/jdk/javax/swing/JMenu/TestSubMenuArrowPosition.java b/test/jdk/javax/swing/JMenu/TestSubMenuArrowPosition.java new file mode 100644 index 0000000000000..2d069d3506ca2 --- /dev/null +++ b/test/jdk/javax/swing/JMenu/TestSubMenuArrowPosition.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7189422 + * @key headful + * @requires (os.family == "mac") + * @summary Verifies arrow position in submenu with empty title + * @run main TestSubMenuArrowPosition + */ + +import java.io.File; +import java.awt.event.InputEvent; +import java.awt.image.BufferedImage; +import java.awt.Color; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.Robot; +import javax.swing.JFrame; +import javax.swing.JMenu; +import javax.swing.JMenuBar; +import javax.swing.SwingUtilities; +import javax.imageio.ImageIO; + +public class TestSubMenuArrowPosition { + + private static JFrame frame; + private static JMenu menu; + private static JMenu subMenu; + + public static void main(String[] args) throws Exception { + Robot robot = new Robot(); + robot.setAutoDelay(100); + try { + SwingUtilities.invokeAndWait(() -> { + frame = new JFrame(); + JMenuBar menuBar = new JMenuBar(); + menu = new JMenu("Test menu"); + subMenu = new JMenu(""); + + menu.add(subMenu); + menuBar.add(menu); + + frame.setJMenuBar(menuBar); + frame.setSize(300, 300); + frame.setLocationRelativeTo(null); + frame.setVisible(true); + }); + + robot.waitForIdle(); + robot.delay(1000); + + Point p = menu.getLocationOnScreen(); + robot.mouseMove(p.x+5, p.y+5); + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + robot.waitForIdle(); + robot.delay(1000); + + p = subMenu.getLocationOnScreen(); + BufferedImage img = + robot.createScreenCapture(new Rectangle(p.x, p.y, + subMenu.getWidth(), + subMenu.getHeight())); + + System.out.println("width " + img.getWidth() + + " height " + img.getHeight()); + Color prevColor = new Color(img.getRGB(img.getWidth() / 2, + img.getHeight() / 2)); + boolean passed = false; + for (int x = img.getWidth() / 2; x < img.getWidth() - 1; ++x) { + System.out.println("x " + x + " rgb = " + + Integer.toHexString( + img.getRGB(x, img.getHeight() / 2))); + Color c = new Color(img.getRGB(x, img.getHeight() / 2)); + if (!c.equals(prevColor)) { + passed = true; + } + prevColor = c; + } + if (!passed) { + ImageIO.write(img, "png", new File("SimpleTest.png")); + throw new RuntimeException("Submenu's arrow have wrong position"); + } + } finally { + SwingUtilities.invokeAndWait(() -> { + if (frame != null) { + frame.dispose(); + } + }); + } + + } +} diff --git a/test/jdk/javax/swing/text/DefaultStyledDocument/DocNegLenCharAttrTest.java b/test/jdk/javax/swing/text/DefaultStyledDocument/DocNegLenCharAttrTest.java new file mode 100644 index 0000000000000..04da5630375ca --- /dev/null +++ b/test/jdk/javax/swing/text/DefaultStyledDocument/DocNegLenCharAttrTest.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.BorderLayout; +import java.awt.Dimension; +import java.lang.reflect.InvocationTargetException; + +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JTextPane; +import javax.swing.SwingUtilities; +import javax.swing.text.AttributeSet; +import javax.swing.text.DefaultStyledDocument; +import javax.swing.text.SimpleAttributeSet; +import javax.swing.text.StyleConstants; + +/* + * @test + * @bug 8291792 + * @key headful + * @summary Test to check if negative length check is implemented in + * setCharacterAttributes(). Test should not throw any exception on + * negative length. + * @run main DocNegLenCharAttrTest + */ +public class DocNegLenCharAttrTest { + private static JFrame frame; + public static void main(String[] args) throws InterruptedException, InvocationTargetException { + try { + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + test(); + } + }); + } finally { + frame.dispose(); + } + System.out.println("Test Pass!"); + } + + public static void test() { + DefaultStyledDocument doc; + frame = new JFrame(); + doc = new DefaultStyledDocument(); + JTextPane text = new JTextPane(); + text.setDocument(doc); + text.setText("hello world"); + doc.setCharacterAttributes(6, -5, + createLabelAttribute("world"), true); + + frame.setPreferredSize(new Dimension(100,70)); + frame.add(text); + frame.setLayout(new BorderLayout()); + frame.add(text,BorderLayout.SOUTH); + frame.setVisible(true); + frame.pack(); + } + + private static AttributeSet createLabelAttribute(String text){ + JLabel lbl = new JLabel(text.toUpperCase()); + SimpleAttributeSet attr = new SimpleAttributeSet(); + StyleConstants.setComponent(attr,lbl); + return attr; + } +} diff --git a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java b/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java index c2a6977de668b..cb2a3f41b5582 100644 --- a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java +++ b/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java @@ -39,11 +39,9 @@ import jdk.test.whitebox.code.CodeBlob; /** - * Test for events: vm/code_sweeper/sweep vm/code_cache/full vm/compiler/failure + * Test for events: vm/code_cache/full vm/compiler/failure * - * We verify: 1. That sweptCount >= flushedCount + zombifiedCount 2. That - * sweepIndex increases by 1. 3. We should get at least one of each of the - * events listed above. + * We verify that we should get at least one of each of the events listed above. * * NOTE! The test is usually able to trigger the events but not always. If an * event is received, the event is verified. If an event is missing, we do NOT @@ -65,7 +63,6 @@ public class TestCodeSweeper { private static final int COMP_LEVEL_FULL_OPTIMIZATION = 4; private static final int SIZE = 1; private static final String METHOD_NAME = "verifyFullEvent"; - private static final String pathSweep = EventNames.SweepCodeCache; private static final String pathFull = EventNames.CodeCacheFull; private static final String pathFailure = EventNames.CompilationFailure; public static final long SEGMENT_SIZE = WhiteBox.getWhiteBox().getUintxVMFlag("CodeCacheSegmentSize"); @@ -82,14 +79,12 @@ public static void main(String[] args) throws Throwable { System.out.println("************************************************"); Recording r = new Recording(); - r.enable(pathSweep); r.enable(pathFull); r.enable(pathFailure); r.start(); provokeEvents(); r.stop(); - int countEventSweep = 0; int countEventFull = 0; int countEventFailure = 0; @@ -97,10 +92,6 @@ public static void main(String[] args) throws Throwable { Events.hasEvents(events); for (RecordedEvent event : events) { switch (event.getEventType().getName()) { - case pathSweep: - countEventSweep++; - verifySingleSweepEvent(event); - break; case pathFull: countEventFull++; verifyFullEvent(event); @@ -112,7 +103,7 @@ public static void main(String[] args) throws Throwable { } } - System.out.println(String.format("eventCount: %d, %d, %d", countEventSweep, countEventFull, countEventFailure)); + System.out.println(String.format("eventCount: %d, %d", countEventFull, countEventFailure)); } private static boolean canAllocate(double size, long maxSize, MemoryPoolMXBean bean) { @@ -131,7 +122,6 @@ private static void provokeEvents() throws NoSuchMethodException, InterruptedExc + "." + METHOD_NAME + "\", " + "BackgroundCompilation: false }]"; // Fill up code heaps until they are almost full - // to trigger the vm/code_sweeper/sweep event. ArrayList blobs = new ArrayList<>(); MemoryPoolMXBean bean = BlobType.All.getMemoryPool(); long max = bean.getUsage().getMax(); @@ -195,15 +185,6 @@ private static void verifyFailureEvent(RecordedEvent event) throws Throwable { Events.assertField(event, "compileId").atLeast(0); } - private static void verifySingleSweepEvent(RecordedEvent event) throws Throwable { - int flushedCount = Events.assertField(event, "flushedCount").atLeast(0).getValue(); - int zombifiedCount = Events.assertField(event, "zombifiedCount").atLeast(0).getValue(); - Events.assertField(event, "sweptCount").atLeast(flushedCount + zombifiedCount); - Events.assertField(event, "sweepId").atLeast(0); - Asserts.assertGreaterThanOrEqual(event.getStartTime(), Instant.EPOCH, "startTime was < 0"); - Asserts.assertGreaterThanOrEqual(event.getEndTime(), event.getStartTime(), "startTime was > endTime"); - } - /** Returns true if less <= bigger. */ private static boolean isOctalLessOrEqual(String less, String bigger) { if (less.length() > bigger.length()) { diff --git a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeperConfig.java b/test/jdk/jdk/jfr/event/compiler/TestCodeSweeperConfig.java deleted file mode 100644 index 65444b0fd8813..0000000000000 --- a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeperConfig.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package jdk.jfr.event.compiler; - -import java.util.List; - -import jdk.jfr.Recording; -import jdk.jfr.consumer.RecordedEvent; -import jdk.test.lib.jfr.EventNames; -import jdk.test.lib.jfr.Events; - - -/** - * @test - * @key jfr - * @requires vm.hasJFR - * @library /test/lib - * @run main/othervm -XX:+UseCodeCacheFlushing -XX:-SegmentedCodeCache jdk.jfr.event.compiler.TestCodeSweeperConfig - * @run main/othervm -XX:+UseCodeCacheFlushing -XX:+SegmentedCodeCache jdk.jfr.event.compiler.TestCodeSweeperConfig - */ -public class TestCodeSweeperConfig { - - private final static String EVENT_NAME = EventNames.CodeSweeperConfiguration; - - public static void main(String[] args) throws Exception { - Recording recording = new Recording(); - recording.enable(EVENT_NAME); - recording.start(); - recording.stop(); - - List events = Events.fromRecording(recording); - Events.hasEvents(events); - for (RecordedEvent event : events) { - System.out.println("Event: " + event); - Events.assertField(event, "sweeperEnabled").equal(true); - Events.assertField(event, "flushingEnabled").equal(true); - } - } -} diff --git a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeperStats.java b/test/jdk/jdk/jfr/event/compiler/TestCodeSweeperStats.java deleted file mode 100644 index da9c1520a767c..0000000000000 --- a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeperStats.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package jdk.jfr.event.compiler; - -import java.io.File; -import java.lang.reflect.Method; -import java.net.MalformedURLException; -import java.net.URL; -import java.nio.file.Paths; -import java.util.List; - -import jdk.test.whitebox.WhiteBox; -import jdk.jfr.Recording; -import jdk.jfr.consumer.RecordedEvent; -import jdk.test.lib.classloader.FilterClassLoader; -import jdk.test.lib.classloader.ParentLastURLClassLoader; -import jdk.test.lib.jfr.EventNames; -import jdk.test.lib.jfr.Events; -import jdk.test.lib.Utils; - -/** - * @test TestCodeSweeperStats - * @key jfr - * @requires vm.hasJFR - * @library /test/lib - * @requires vm.compMode!="Xint" - * @build jdk.test.whitebox.WhiteBox - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/othervm -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI - * -XX:CompileOnly=jdk.jfr.event.compiler.TestCodeSweeperStats::dummyMethod - * -XX:+SegmentedCodeCache jdk.jfr.event.compiler.TestCodeSweeperStats - * @run main/othervm -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI - * -XX:CompileOnly=jdk.jfr.event.compiler.TestCodeSweeperStats::dummyMethod - * -XX:-SegmentedCodeCache jdk.jfr.event.compiler.TestCodeSweeperStats - */ -public class TestCodeSweeperStats { - private static final String EVENT_NAME = EventNames.CodeSweeperStatistics; - private static final int WAIT_TIME = 10_000; - private static final String CLASS_METHOD_TO_COMPILE = "dummyMethod"; - private static final int METHODS_TO_COMPILE = Integer.getInteger("compile.methods.count", 10); - private static final int COMP_LEVEL_SIMPLE = 1; - private static final int COMP_LEVEL_FULL_OPTIMIZATION = 4; - - public static void main(String[] args) throws Exception { - Recording recording = new Recording(); - recording.enable(EVENT_NAME).with("period", "endChunk"); - recording.start(); - compileAndSweep(); - recording.stop(); - - List events = Events.fromRecording(recording); - Events.hasEvents(events); - for (RecordedEvent event : events) { - Events.assertField(event, "sweepCount").atLeast(1); - Events.assertField(event, "methodReclaimedCount").equal(METHODS_TO_COMPILE); - Events.assertField(event, "totalSweepTime").atLeast(0L); - Events.assertField(event, "peakFractionTime").atLeast(0L); - Events.assertField(event, "peakSweepTime").atLeast(0L); - } - } - - private static void compileAndSweep() throws InterruptedException { - WhiteBox WB = WhiteBox.getWhiteBox(); - for (int i = 0; i < METHODS_TO_COMPILE; i++) { - System.out.println("compile " + i); - compileMethod(); - } - - WB.deoptimizeAll(); - System.out.println("All methods deoptimized"); - - // method will be sweeped out of code cache after 5 sweep cycles - for (int i = 0; i < 5; i++) { - WB.fullGC(); - WB.forceNMethodSweep(); - - } - // now wait for event(s) to be fired - Thread.sleep(WAIT_TIME); - } - - public void dummyMethod() { - System.out.println("Hello World!"); - } - - protected static void compileMethod() { - ClassLoader current = TestCodeSweeperStats.class.getClassLoader(); - String[] cpaths = System.getProperty("test.classes", ".").split(File.pathSeparator); - URL[] urls = new URL[cpaths.length]; - try { - for (int i = 0; i < cpaths.length; i++) { - urls[i] = Paths.get(cpaths[i]).toUri().toURL(); - } - } catch (MalformedURLException e) { - throw new Error(e); - } - - String currentClassName = TestCodeSweeperStats.class.getName(); - FilterClassLoader cl = new FilterClassLoader(new ParentLastURLClassLoader(urls, current), ClassLoader.getSystemClassLoader(), (name) -> currentClassName.equals(name)); - Class loadedClass = null; - String className = currentClassName; - try { - loadedClass = cl.loadClass(className); - } catch (ClassNotFoundException ex) { - throw new Error("Couldn't load class " + className, ex); - } - try { - Method mtd = loadedClass.getMethod(CLASS_METHOD_TO_COMPILE); - WhiteBox WB = WhiteBox.getWhiteBox(); - WB.testSetDontInlineMethod(mtd, true); - String directive = "[{ match: \"" + TestCodeSweeperStats.class.getName().replace('.', '/') - + "." + CLASS_METHOD_TO_COMPILE + "\", " + "BackgroundCompilation: false }]"; - WB.addCompilerDirective(directive); - if (!WB.enqueueMethodForCompilation(mtd, COMP_LEVEL_FULL_OPTIMIZATION)) { - WB.enqueueMethodForCompilation(mtd, COMP_LEVEL_SIMPLE); - } - Utils.waitForCondition(() -> WB.isMethodCompiled(mtd)); - } catch (NoSuchMethodException e) { - throw new Error("An exception while trying compile method " + e.getMessage(), e); - } - } -} diff --git a/test/jdk/jdk/jfr/event/compiler/TestJitRestart.java b/test/jdk/jdk/jfr/event/compiler/TestJitRestart.java index 711eb6317a638..c1a11356c7840 100644 --- a/test/jdk/jdk/jfr/event/compiler/TestJitRestart.java +++ b/test/jdk/jdk/jfr/event/compiler/TestJitRestart.java @@ -72,7 +72,7 @@ private static boolean testWithBlobType(BlobType btype, long availableSize) thro r.start(); long addr = WHITE_BOX.allocateCodeBlob(availableSize, btype.id); WHITE_BOX.freeCodeBlob(addr); - WHITE_BOX.forceNMethodSweep(); + WHITE_BOX.fullGC(); r.stop(); List events = Events.fromRecording(r); diff --git a/test/jdk/sun/java2d/Disposer/TestDisposerRace.java b/test/jdk/sun/java2d/Disposer/TestDisposerRace.java new file mode 100644 index 0000000000000..e22e59799515c --- /dev/null +++ b/test/jdk/sun/java2d/Disposer/TestDisposerRace.java @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2022, JetBrains s.r.o.. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import javax.swing.SwingUtilities; + +import sun.java2d.Disposer; +import sun.java2d.DisposerRecord; + +/** + * @test + * @bug 8289208 + * @summary Verifies Disposer robustness in a multi-threaded environment. + * @run main/othervm -mx128m TestDisposerRace + * @modules java.desktop/sun.java2d + */ +public final class TestDisposerRace { + private static final AtomicInteger recordsCount = new AtomicInteger(); + private static volatile boolean disposerDone = false; + + public static void main(String[] args) throws Exception { + TestDisposerRace test = new TestDisposerRace(); + test.run(); + + checkRecordsCountIsSane(); + if (recordsCount.get() > 0) { + throw new RuntimeException("Some records (" + recordsCount + ") have not been disposed"); + } + } + + TestDisposerRace() { + addRecordsToDisposer(30_000); + } + + void run() throws Exception { + generateOOME(); + for (int i = 0; i < 1000; ++i) { + SwingUtilities.invokeAndWait(Disposer::pollRemove); + if (i % 10 == 0) { + // Adding records will race with the diposer trying to remove them + addRecordsToDisposer(1000); + } + } + + Disposer.addObjectRecord(new Object(), new FinalDisposerRecord()); + + while (!disposerDone) { + generateOOME(); + } + } + + private static void checkRecordsCountIsSane() { + if (recordsCount.get() < 0) { + throw new RuntimeException("Disposed more records than were added"); + } + } + + private void addRecordsToDisposer(int count) { + checkRecordsCountIsSane(); + + recordsCount.addAndGet(count); + + MyDisposerRecord disposerRecord = new MyDisposerRecord(); + for (int i = 0; i < count; i++) { + Disposer.addObjectRecord(new Object(), disposerRecord); + } + } + + class MyDisposerRecord implements DisposerRecord { + public void dispose() { + recordsCount.decrementAndGet(); + } + } + + class FinalDisposerRecord implements DisposerRecord { + public void dispose() { + disposerDone = true; + } + } + + private static void giveGCAChance() { + try { + Thread.sleep(2000); + } catch (InterruptedException ignored) {} + } + + private static void generateOOME() throws Exception { + final List leak = new LinkedList<>(); + try { + while (true) { + leak.add(new byte[1024 * 1024]); + } + } catch (OutOfMemoryError ignored) {} + giveGCAChance(); + } +} diff --git a/test/lib/jdk/test/lib/jfr/EventNames.java b/test/lib/jdk/test/lib/jfr/EventNames.java index 754a1328026f4..91cc11e62bc25 100644 --- a/test/lib/jdk/test/lib/jfr/EventNames.java +++ b/test/lib/jdk/test/lib/jfr/EventNames.java @@ -156,9 +156,6 @@ public class EventNames { public final static String CompilerConfiguration = PREFIX + "CompilerConfiguration"; public final static String CodeCacheStatistics = PREFIX + "CodeCacheStatistics"; public final static String CodeCacheConfiguration = PREFIX + "CodeCacheConfiguration"; - public final static String CodeSweeperStatistics = PREFIX + "CodeSweeperStatistics"; - public final static String CodeSweeperConfiguration = PREFIX + "CodeSweeperConfiguration"; - public final static String SweepCodeCache = PREFIX + "SweepCodeCache"; public final static String CodeCacheFull = PREFIX + "CodeCacheFull"; public final static String ObjectAllocationInNewTLAB = PREFIX + "ObjectAllocationInNewTLAB"; public final static String ObjectAllocationOutsideTLAB = PREFIX + "ObjectAllocationOutsideTLAB"; diff --git a/test/lib/jdk/test/whitebox/WhiteBox.java b/test/lib/jdk/test/whitebox/WhiteBox.java index 6df2a1cd3cc31..806d6d74a440a 100644 --- a/test/lib/jdk/test/whitebox/WhiteBox.java +++ b/test/lib/jdk/test/whitebox/WhiteBox.java @@ -401,7 +401,6 @@ public long allocateCodeBlob(long size, int type) { return allocateCodeBlob( intSize, type); } public native void freeCodeBlob(long addr); - public native void forceNMethodSweep(); public native Object[] getCodeHeapEntries(int type); public native int getCompilationActivityMode(); private native long getMethodData0(Executable method); diff --git a/test/micro/org/openjdk/bench/java/security/ProtectionDomainBench.java b/test/micro/org/openjdk/bench/java/security/ProtectionDomainBench.java new file mode 100644 index 0000000000000..83765805f77c6 --- /dev/null +++ b/test/micro/org/openjdk/bench/java/security/ProtectionDomainBench.java @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package org.openjdk.bench.java.security; + +import java.security.*; +import java.net.*; +import java.io.*; + +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import org.openjdk.bench.util.InMemoryJavaCompiler; + +@State(Scope.Thread) +@OutputTimeUnit(TimeUnit.SECONDS) +@Warmup(iterations = 5, time = 2) +@Measurement(iterations = 5, time = 2) +@BenchmarkMode(Mode.Throughput) +public class ProtectionDomainBench { + + @Param({"100"}) + public int numberOfClasses; + + @Param({"10"}) + public int numberOfCodeSources; + + static byte[][] compiledClasses; + static Class[] loadedClasses; + static String[] classNames; + static int index = 0; + static CodeSource[] cs; + + static String B(int count) { + return "public class B" + count + " {" + + " static int intField;" + + " public static void compiledMethod() { " + + " intField++;" + + " }" + + "}"; + } + + @Setup(Level.Trial) + public void setupClasses() throws Exception { + compiledClasses = new byte[numberOfClasses][]; + loadedClasses = new Class[numberOfClasses]; + classNames = new String[numberOfClasses]; + cs = new CodeSource[numberOfCodeSources]; + + for (int i = 0; i < numberOfCodeSources; i++) { + URL u = new URL("file:/tmp/duke" + i); + cs[i] = new CodeSource(u, (java.security.cert.Certificate[]) null); + } + + for (int i = 0; i < numberOfClasses; i++) { + classNames[i] = "B" + i; + compiledClasses[i] = InMemoryJavaCompiler.compile(classNames[i], B(i)); + } + + } + + static class ProtectionDomainBenchLoader extends SecureClassLoader { + + ProtectionDomainBenchLoader() { + super(); + } + + ProtectionDomainBenchLoader(ClassLoader parent) { + super(parent); + } + + @Override + protected Class findClass(String name) throws ClassNotFoundException { + if (name.equals(classNames[index] /* "B" + index */)) { + assert compiledClasses[index] != null; + return defineClass(name, compiledClasses[index] , 0, (compiledClasses[index]).length, cs[index % cs.length] ); + } else { + return super.findClass(name); + } + } + } + + void work() throws ClassNotFoundException { + ProtectionDomainBench.ProtectionDomainBenchLoader loader1 = new + ProtectionDomainBench.ProtectionDomainBenchLoader(); + + for (index = 0; index < compiledClasses.length; index++) { + Class c = loader1.findClass(classNames[index]); + loadedClasses[index] = c; + } + } + + @Benchmark + @Fork(value = 3, jvmArgsPrepend={"-Djava.security.manager=allow"}) + public void withSecurityManager() throws ClassNotFoundException { + work(); + } + + @Benchmark + @Fork(value = 3) + public void noSecurityManager() throws ClassNotFoundException { + work(); + } +} diff --git a/test/micro/org/openjdk/bench/util/InMemoryJavaCompiler.java b/test/micro/org/openjdk/bench/util/InMemoryJavaCompiler.java new file mode 100644 index 0000000000000..5f1fafdf05cc8 --- /dev/null +++ b/test/micro/org/openjdk/bench/util/InMemoryJavaCompiler.java @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package org.openjdk.bench.util; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import javax.tools.ForwardingJavaFileManager; +import javax.tools.FileObject; +import javax.tools.JavaCompiler; +import javax.tools.JavaCompiler.CompilationTask; +import javax.tools.JavaFileManager; +import javax.tools.JavaFileObject; +import javax.tools.JavaFileObject.Kind; +import javax.tools.SimpleJavaFileObject; +import javax.tools.StandardLocation; +import javax.tools.ToolProvider; + +/** + * {@code InMemoryJavaCompiler} can be used for compiling a {@link + * CharSequence} to a {@code byte[]}. + * + * The compiler will not use the file system at all, instead using a {@link + * ByteArrayOutputStream} for storing the byte code. For the source code, any + * kind of {@link CharSequence} can be used, e.g. {@link String}, {@link + * StringBuffer} or {@link StringBuilder}. + * + * The {@code InMemoryCompiler} can easily be used together with a {@code + * ByteClassLoader} to easily compile and load source code in a {@link String}: + * + *
+ * {@code
+ * import jdk.test.lib.compiler.InMemoryJavaCompiler;
+ * import jdk.test.lib.ByteClassLoader;
+ *
+ * class Example {
+ *     public static void main(String[] args) {
+ *         String className = "Foo";
+ *         String sourceCode = "public class " + className + " {" +
+ *                             "    public void bar() {" +
+ *                             "        System.out.println("Hello from bar!");" +
+ *                             "    }" +
+ *                             "}";
+ *         byte[] byteCode = InMemoryJavaCompiler.compile(className, sourceCode);
+ *         Class fooClass = ByteClassLoader.load(className, byteCode);
+ *     }
+ * }
+ * }
+ * 
+ */ +public class InMemoryJavaCompiler { + private static class MemoryJavaFileObject extends SimpleJavaFileObject { + private final String className; + private final CharSequence sourceCode; + private final ByteArrayOutputStream byteCode; + + public MemoryJavaFileObject(String className, CharSequence sourceCode) { + super(URI.create("string:///" + className.replace('.','/') + Kind.SOURCE.extension), Kind.SOURCE); + this.className = className; + this.sourceCode = sourceCode; + this.byteCode = new ByteArrayOutputStream(); + } + + @Override + public CharSequence getCharContent(boolean ignoreEncodingErrors) { + return sourceCode; + } + + @Override + public OutputStream openOutputStream() throws IOException { + return byteCode; + } + + public byte[] getByteCode() { + return byteCode.toByteArray(); + } + + public String getClassName() { + return className; + } + } + + private static class FileManagerWrapper extends ForwardingJavaFileManager { + private static final Location PATCH_LOCATION = new Location() { + @Override + public String getName() { + return "patch module location"; + } + + @Override + public boolean isOutputLocation() { + return false; + } + }; + private final MemoryJavaFileObject file; + private final String moduleOverride; + + public FileManagerWrapper(MemoryJavaFileObject file, String moduleOverride) { + super(getCompiler().getStandardFileManager(null, null, null)); + this.file = file; + this.moduleOverride = moduleOverride; + } + + @Override + public JavaFileObject getJavaFileForOutput(Location location, String className, + Kind kind, FileObject sibling) + throws IOException { + if (!file.getClassName().equals(className)) { + throw new IOException("Expected class with name " + file.getClassName() + + ", but got " + className); + } + return file; + } + + @Override + public Location getLocationForModule(Location location, JavaFileObject fo) throws IOException { + if (fo == file && moduleOverride != null) { + return PATCH_LOCATION; + } + return super.getLocationForModule(location, fo); + } + + @Override + public String inferModuleName(Location location) throws IOException { + if (location == PATCH_LOCATION) { + return moduleOverride; + } + return super.inferModuleName(location); + } + + @Override + public boolean hasLocation(Location location) { + return super.hasLocation(location) || location == StandardLocation.PATCH_MODULE_PATH; + } + + } + + /** + * Compiles the class with the given name and source code. + * + * @param className The name of the class + * @param sourceCode The source code for the class with name {@code className} + * @param options additional command line options + * @throws RuntimeException if the compilation did not succeed + * @return The resulting byte code from the compilation + */ + public static byte[] compile(String className, CharSequence sourceCode, String... options) { + MemoryJavaFileObject file = new MemoryJavaFileObject(className, sourceCode); + CompilationTask task = getCompilationTask(file, options); + + if(!task.call()) { + throw new RuntimeException("Could not compile " + className + " with source code " + sourceCode); + } + + return file.getByteCode(); + } + + private static JavaCompiler getCompiler() { + return ToolProvider.getSystemJavaCompiler(); + } + + private static CompilationTask getCompilationTask(MemoryJavaFileObject file, String... options) { + List opts = new ArrayList<>(); + String moduleOverride = null; + for (String opt : options) { + if (opt.startsWith("--patch-module=")) { + moduleOverride = opt.substring("--patch-module=".length()); + } else { + opts.add(opt); + } + } + return getCompiler().getTask(null, new FileManagerWrapper(file, moduleOverride), null, opts, null, Arrays.asList(file)); + } +}