Merge
This commit is contained in:
commit
7613af50d9
1
.hgtags
1
.hgtags
@ -519,3 +519,4 @@ f0f5d23449d31f1b3580c8a73313918cafeaefd7 jdk-12+11
|
||||
6f04692c7d5137ee34a6bd94c0c8a6c9219cb127 jdk-12+14
|
||||
f8626bcc169813a4b2a15880386b952719d1d6d1 jdk-12+15
|
||||
199658d1ef860cdc17055b4fd3e94b057f292fe9 jdk-12+16
|
||||
eefa65e142af305923d2adcd596fab9c639723a1 jdk-12+17
|
||||
|
@ -707,7 +707,6 @@ ls build/linux-aarch64-normal-server-release/</code></pre></li>
|
||||
<p>Additional architectures might be supported by Debian/Ubuntu Ports.</p>
|
||||
<h3 id="building-for-armaarch64">Building for ARM/aarch64</h3>
|
||||
<p>A common cross-compilation target is the ARM CPU. When building for ARM, it is useful to set the ABI profile. A number of pre-defined ABI profiles are available using <code>--with-abi-profile</code>: arm-vfp-sflt, arm-vfp-hflt, arm-sflt, armv5-vfp-sflt, armv6-vfp-hflt. Note that soft-float ABIs are no longer properly supported by the JDK.</p>
|
||||
<p>The JDK contains two different ports for the aarch64 platform, one is the original aarch64 port from the <a href="http://openjdk.java.net/projects/aarch64-port">AArch64 Port Project</a> and one is a 64-bit version of the Oracle contributed ARM port. When targeting aarch64, by the default the original aarch64 port is used. To select the Oracle ARM 64 port, use <code>--with-cpu-port=arm64</code>. Also set the corresponding value (<code>aarch64</code> or <code>arm64</code>) to --with-abi-profile, to ensure a consistent build.</p>
|
||||
<h3 id="verifying-the-build">Verifying the Build</h3>
|
||||
<p>The build will end up in a directory named like <code>build/linux-arm-normal-server-release</code>.</p>
|
||||
<p>Inside this build output directory, the <code>images/jdk</code> will contain the newly built JDK, for your <em>target</em> system.</p>
|
||||
|
@ -1080,14 +1080,6 @@ available using `--with-abi-profile`: arm-vfp-sflt, arm-vfp-hflt, arm-sflt,
|
||||
armv5-vfp-sflt, armv6-vfp-hflt. Note that soft-float ABIs are no longer
|
||||
properly supported by the JDK.
|
||||
|
||||
The JDK contains two different ports for the aarch64 platform, one is the
|
||||
original aarch64 port from the [AArch64 Port Project](
|
||||
http://openjdk.java.net/projects/aarch64-port) and one is a 64-bit version of
|
||||
the Oracle contributed ARM port. When targeting aarch64, by the default the
|
||||
original aarch64 port is used. To select the Oracle ARM 64 port, use
|
||||
`--with-cpu-port=arm64`. Also set the corresponding value (`aarch64` or
|
||||
`arm64`) to --with-abi-profile, to ensure a consistent build.
|
||||
|
||||
### Verifying the Build
|
||||
|
||||
The build will end up in a directory named like
|
||||
|
@ -18,12 +18,13 @@
|
||||
</header>
|
||||
<nav id="TOC">
|
||||
<ul>
|
||||
<li><a href="#using-the-run-test-framework">Using the run-test framework</a><ul>
|
||||
<li><a href="#using-make-test-the-run-test-framework">Using "make test" (the run-test framework)</a><ul>
|
||||
<li><a href="#configuration">Configuration</a></li>
|
||||
</ul></li>
|
||||
<li><a href="#test-selection">Test selection</a><ul>
|
||||
<li><a href="#jtreg">JTReg</a></li>
|
||||
<li><a href="#gtest">Gtest</a></li>
|
||||
<li><a href="#special-tests">Special tests</a></li>
|
||||
</ul></li>
|
||||
<li><a href="#test-results-and-summary">Test results and summary</a></li>
|
||||
<li><a href="#test-suite-control">Test suite control</a><ul>
|
||||
@ -32,22 +33,23 @@
|
||||
</ul></li>
|
||||
</ul>
|
||||
</nav>
|
||||
<h2 id="using-the-run-test-framework">Using the run-test framework</h2>
|
||||
<h2 id="using-make-test-the-run-test-framework">Using "make test" (the run-test framework)</h2>
|
||||
<p>This new way of running tests is developer-centric. It assumes that you have built a JDK locally and want to test it. Running common test targets is simple, and more complex ad-hoc combination of tests is possible. The user interface is forgiving, and clearly report errors it cannot resolve.</p>
|
||||
<p>The main target "run-test" uses the jdk-image as the tested product. There is also an alternate target "exploded-run-test" that uses the exploded image instead. Not all tests will run successfully on the exploded image, but using this target can greatly improve rebuild times for certain workflows.</p>
|
||||
<p>The main target <code>test</code> uses the jdk-image as the tested product. There is also an alternate target <code>exploded-test</code> that uses the exploded image instead. Not all tests will run successfully on the exploded image, but using this target can greatly improve rebuild times for certain workflows.</p>
|
||||
<p>Previously, <code>make test</code> was used invoke an old system for running test, and <code>make run-test</code> was used for the new test framework. For backward compatibility with scripts and muscle memory, <code>run-test</code> (and variants like <code>exploded-run-test</code> or <code>run-test-tier1</code>) are kept as aliases. The old system can still be accessed for some time using <code>cd test && make</code>.</p>
|
||||
<p>Some example command-lines:</p>
|
||||
<pre><code>$ make run-test-tier1
|
||||
$ make run-test-jdk_lang JTREG="JOBS=8"
|
||||
$ make run-test TEST=jdk_lang
|
||||
$ make run-test-only TEST="gtest:LogTagSet gtest:LogTagSetDescriptions" GTEST="REPEAT=-1"
|
||||
$ make run-test TEST="hotspot:hotspot_gc" JTREG="JOBS=1;TIMEOUT=8;VM_OPTIONS=-XshowSettings -Xlog:gc+ref=debug"
|
||||
$ make run-test TEST="jtreg:test/hotspot:hotspot_gc test/hotspot/jtreg/native_sanity/JniVersion.java"
|
||||
$ make exploded-run-test TEST=tier2</code></pre>
|
||||
<pre><code>$ make test-tier1
|
||||
$ make test-jdk_lang JTREG="JOBS=8"
|
||||
$ make test TEST=jdk_lang
|
||||
$ make test-only TEST="gtest:LogTagSet gtest:LogTagSetDescriptions" GTEST="REPEAT=-1"
|
||||
$ make test TEST="hotspot:hotspot_gc" JTREG="JOBS=1;TIMEOUT=8;VM_OPTIONS=-XshowSettings -Xlog:gc+ref=debug"
|
||||
$ make test TEST="jtreg:test/hotspot:hotspot_gc test/hotspot/jtreg/native_sanity/JniVersion.java"
|
||||
$ make exploded-test TEST=tier2</code></pre>
|
||||
<h3 id="configuration">Configuration</h3>
|
||||
<p>To be able to run JTReg tests, <code>configure</code> needs to know where to find the JTReg test framework. If it is not picked up automatically by configure, use the <code>--with-jtreg=<path to jtreg home></code> option to point to the JTReg framework. Note that this option should point to the JTReg home, i.e. the top directory, containing <code>lib/jtreg.jar</code> etc. (An alternative is to set the <code>JT_HOME</code> environment variable to point to the JTReg home before running <code>configure</code>.)</p>
|
||||
<h2 id="test-selection">Test selection</h2>
|
||||
<p>All functionality is available using the run-test make target. In this use case, the test or tests to be executed is controlled using the <code>TEST</code> variable. To speed up subsequent test runs with no source code changes, run-test-only can be used instead, which do not depend on the source and test image build.</p>
|
||||
<p>For some common top-level tests, direct make targets have been generated. This includes all JTReg test groups, the hotspot gtest, and custom tests (if present). This means that <code>make run-test-tier1</code> is equivalent to <code>make run-test TEST="tier1"</code>, but the latter is more tab-completion friendly. For more complex test runs, the <code>run-test TEST="x"</code> solution needs to be used.</p>
|
||||
<p>All functionality is available using the <code>test</code> make target. In this use case, the test or tests to be executed is controlled using the <code>TEST</code> variable. To speed up subsequent test runs with no source code changes, <code>test-only</code> can be used instead, which do not depend on the source and test image build.</p>
|
||||
<p>For some common top-level tests, direct make targets have been generated. This includes all JTReg test groups, the hotspot gtest, and custom tests (if present). This means that <code>make test-tier1</code> is equivalent to <code>make test TEST="tier1"</code>, but the latter is more tab-completion friendly. For more complex test runs, the <code>test TEST="x"</code> solution needs to be used.</p>
|
||||
<p>The test specifications given in <code>TEST</code> is parsed into fully qualified test descriptors, which clearly and unambigously show which tests will be run. As an example, <code>:tier1</code> will expand to <code>jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 jtreg:$(TOPDIR)/test/nashorn:tier1 jtreg:$(TOPDIR)/test/jaxp:tier1</code>. You can always submit a list of fully qualified test descriptors in the <code>TEST</code> variable if you want to shortcut the parser.</p>
|
||||
<h3 id="jtreg">JTReg</h3>
|
||||
<p>JTReg tests can be selected either by picking a JTReg test group, or a selection of files or directories containing JTReg tests.</p>
|
||||
@ -59,6 +61,14 @@ $ make exploded-run-test TEST=tier2</code></pre>
|
||||
<p>Since the Hotspot Gtest suite is so quick, the default is to run all tests. This is specified by just <code>gtest</code>, or as a fully qualified test descriptor <code>gtest:all</code>.</p>
|
||||
<p>If you want, you can single out an individual test or a group of tests, for instance <code>gtest:LogDecorations</code> or <code>gtest:LogDecorations.level_test_vm</code>. This can be particularly useful if you want to run a shaky test repeatedly.</p>
|
||||
<p>For Gtest, there is a separate test suite for each JVM variant. The JVM variant is defined by adding <code>/<variant></code> to the test descriptor, e.g. <code>gtest:Log/client</code>. If you specify no variant, gtest will run once for each JVM variant present (e.g. server, client). So if you only have the server JVM present, then <code>gtest:all</code> will be equivalent to <code>gtest:all/server</code>.</p>
|
||||
<h3 id="special-tests">Special tests</h3>
|
||||
<p>A handful of odd tests that are not covered by any other testing framework are accessible using the <code>special:</code> test descriptor. Currently, this includes <code>hotspot-internal</code>, <code>failure-handler</code> and <code>make</code>.</p>
|
||||
<ul>
|
||||
<li><p>Hotspot legacy internal testing (run using <code>-XX:+ExecuteInternalVMTests</code>) is run using <code>special:hotspot-internal</code> or just <code>hotspot-internal</code> as test descriptor, and will only work on a debug JVM.</p></li>
|
||||
<li><p>Failure handler testing is run using <code>special:failure-handler</code> or just <code>failure-handler</code> as test descriptor.</p></li>
|
||||
<li><p>Tests for the build system, including both makefiles and related functionality, is run using <code>special:make</code> or just <code>make</code> as test descriptor. This is equivalent to <code>special:make:all</code>.</p>
|
||||
<p>A specific make test can be run by supplying it as argument, e.g. <code>special:make:idea</code>. As a special syntax, this can also be expressed as <code>make-idea</code>, which allows for command lines as <code>make test-make-idea</code>.</p></li>
|
||||
</ul>
|
||||
<h2 id="test-results-and-summary">Test results and summary</h2>
|
||||
<p>At the end of the test run, a summary of all tests run will be presented. This will have a consistent look, regardless of what test suites were used. This is a sample summary:</p>
|
||||
<pre><code>==============================
|
||||
@ -72,7 +82,7 @@ Test summary
|
||||
TEST FAILURE</code></pre>
|
||||
<p>Tests where the number of TOTAL tests does not equal the number of PASSed tests will be considered a test failure. These are marked with the <code>>> ... <<</code> marker for easy identification.</p>
|
||||
<p>The classification of non-passed tests differs a bit between test suites. In the summary, ERROR is used as a catch-all for tests that neither passed nor are classified as failed by the framework. This might indicate test framework error, timeout or other problems.</p>
|
||||
<p>In case of test failures, <code>make run-test</code> will exit with a non-zero exit value.</p>
|
||||
<p>In case of test failures, <code>make test</code> will exit with a non-zero exit value.</p>
|
||||
<p>All tests have their result stored in <code>build/$BUILD/test-results/$TEST_ID</code>, where TEST_ID is a path-safe conversion from the fully qualified test descriptor, e.g. for <code>jtreg:jdk/test:tier1</code> the TEST_ID is <code>jtreg_jdk_test_tier1</code>. This path is also printed in the log at the end of the test run.</p>
|
||||
<p>Additional work data is stored in <code>build/$BUILD/test-support/$TEST_ID</code>. For some frameworks, this directory might contain information that is useful in determining the cause of a failed test.</p>
|
||||
<h2 id="test-suite-control">Test suite control</h2>
|
||||
|
@ -1,26 +1,32 @@
|
||||
% Testing the JDK
|
||||
|
||||
## Using the run-test framework
|
||||
## Using "make test" (the run-test framework)
|
||||
|
||||
This new way of running tests is developer-centric. It assumes that you have
|
||||
built a JDK locally and want to test it. Running common test targets is simple,
|
||||
and more complex ad-hoc combination of tests is possible. The user interface is
|
||||
forgiving, and clearly report errors it cannot resolve.
|
||||
|
||||
The main target "run-test" uses the jdk-image as the tested product. There is
|
||||
also an alternate target "exploded-run-test" that uses the exploded image
|
||||
The main target `test` uses the jdk-image as the tested product. There is
|
||||
also an alternate target `exploded-test` that uses the exploded image
|
||||
instead. Not all tests will run successfully on the exploded image, but using
|
||||
this target can greatly improve rebuild times for certain workflows.
|
||||
|
||||
Previously, `make test` was used invoke an old system for running test, and
|
||||
`make run-test` was used for the new test framework. For backward compatibility
|
||||
with scripts and muscle memory, `run-test` (and variants like
|
||||
`exploded-run-test` or `run-test-tier1`) are kept as aliases. The old system
|
||||
can still be accessed for some time using `cd test && make`.
|
||||
|
||||
Some example command-lines:
|
||||
|
||||
$ make run-test-tier1
|
||||
$ make run-test-jdk_lang JTREG="JOBS=8"
|
||||
$ make run-test TEST=jdk_lang
|
||||
$ make run-test-only TEST="gtest:LogTagSet gtest:LogTagSetDescriptions" GTEST="REPEAT=-1"
|
||||
$ make run-test TEST="hotspot:hotspot_gc" JTREG="JOBS=1;TIMEOUT=8;VM_OPTIONS=-XshowSettings -Xlog:gc+ref=debug"
|
||||
$ make run-test TEST="jtreg:test/hotspot:hotspot_gc test/hotspot/jtreg/native_sanity/JniVersion.java"
|
||||
$ make exploded-run-test TEST=tier2
|
||||
$ make test-tier1
|
||||
$ make test-jdk_lang JTREG="JOBS=8"
|
||||
$ make test TEST=jdk_lang
|
||||
$ make test-only TEST="gtest:LogTagSet gtest:LogTagSetDescriptions" GTEST="REPEAT=-1"
|
||||
$ make test TEST="hotspot:hotspot_gc" JTREG="JOBS=1;TIMEOUT=8;VM_OPTIONS=-XshowSettings -Xlog:gc+ref=debug"
|
||||
$ make test TEST="jtreg:test/hotspot:hotspot_gc test/hotspot/jtreg/native_sanity/JniVersion.java"
|
||||
$ make exploded-test TEST=tier2
|
||||
|
||||
### Configuration
|
||||
|
||||
@ -33,16 +39,16 @@ environment variable to point to the JTReg home before running `configure`.)
|
||||
|
||||
## Test selection
|
||||
|
||||
All functionality is available using the run-test make target. In this use
|
||||
case, the test or tests to be executed is controlled using the `TEST` variable.
|
||||
To speed up subsequent test runs with no source code changes, run-test-only can
|
||||
be used instead, which do not depend on the source and test image build.
|
||||
All functionality is available using the `test` make target. In this use case,
|
||||
the test or tests to be executed is controlled using the `TEST` variable. To
|
||||
speed up subsequent test runs with no source code changes, `test-only` can be
|
||||
used instead, which do not depend on the source and test image build.
|
||||
|
||||
For some common top-level tests, direct make targets have been generated. This
|
||||
includes all JTReg test groups, the hotspot gtest, and custom tests (if
|
||||
present). This means that `make run-test-tier1` is equivalent to `make run-test
|
||||
present). This means that `make test-tier1` is equivalent to `make test
|
||||
TEST="tier1"`, but the latter is more tab-completion friendly. For more complex
|
||||
test runs, the `run-test TEST="x"` solution needs to be used.
|
||||
test runs, the `test TEST="x"` solution needs to be used.
|
||||
|
||||
The test specifications given in `TEST` is parsed into fully qualified test
|
||||
descriptors, which clearly and unambigously show which tests will be run. As an
|
||||
@ -98,6 +104,27 @@ is defined by adding `/<variant>` to the test descriptor, e.g.
|
||||
variant present (e.g. server, client). So if you only have the server JVM
|
||||
present, then `gtest:all` will be equivalent to `gtest:all/server`.
|
||||
|
||||
### Special tests
|
||||
|
||||
A handful of odd tests that are not covered by any other testing framework are
|
||||
accessible using the `special:` test descriptor. Currently, this includes
|
||||
`hotspot-internal`, `failure-handler` and `make`.
|
||||
|
||||
* Hotspot legacy internal testing (run using `-XX:+ExecuteInternalVMTests`)
|
||||
is run using `special:hotspot-internal` or just `hotspot-internal` as test
|
||||
descriptor, and will only work on a debug JVM.
|
||||
|
||||
* Failure handler testing is run using `special:failure-handler` or just
|
||||
`failure-handler` as test descriptor.
|
||||
|
||||
* Tests for the build system, including both makefiles and related
|
||||
functionality, is run using `special:make` or just `make` as test
|
||||
descriptor. This is equivalent to `special:make:all`.
|
||||
|
||||
A specific make test can be run by supplying it as argument, e.g.
|
||||
`special:make:idea`. As a special syntax, this can also be expressed as
|
||||
`make-idea`, which allows for command lines as `make test-make-idea`.
|
||||
|
||||
## Test results and summary
|
||||
|
||||
At the end of the test run, a summary of all tests run will be presented. This
|
||||
@ -123,7 +150,7 @@ the summary, ERROR is used as a catch-all for tests that neither passed nor are
|
||||
classified as failed by the framework. This might indicate test framework
|
||||
error, timeout or other problems.
|
||||
|
||||
In case of test failures, `make run-test` will exit with a non-zero exit value.
|
||||
In case of test failures, `make test` will exit with a non-zero exit value.
|
||||
|
||||
All tests have their result stored in `build/$BUILD/test-results/$TEST_ID`,
|
||||
where TEST_ID is a path-safe conversion from the fully qualified test
|
||||
|
@ -61,11 +61,11 @@ MODULES_SOURCE_PATH := $(call PathList, $(call GetModuleSrcPath) \
|
||||
$(SUPPORT_OUTPUTDIR)/rmic/* $(TOPDIR)/src/*/share/doc/stub)
|
||||
|
||||
# URLs
|
||||
JAVADOC_BASE_URL := http://www.oracle.com/pls/topic/lookup?ctx=javase$(VERSION_NUMBER)&id=homepage
|
||||
BUG_SUBMIT_URL := http://bugreport.java.com/bugreport/
|
||||
JAVADOC_BASE_URL := https://docs.oracle.com/pls/topic/lookup?ctx=javase$(VERSION_NUMBER)&id=homepage
|
||||
BUG_SUBMIT_URL := https://bugreport.java.com/bugreport/
|
||||
COPYRIGHT_URL := {@docroot}/../legal/copyright.html
|
||||
LICENSE_URL := http://www.oracle.com/technetwork/java/javase/terms/license/java$(VERSION_NUMBER)speclicense.html
|
||||
REDISTRIBUTION_URL := http://www.oracle.com/technetwork/java/redist-137594.html
|
||||
LICENSE_URL := https://www.oracle.com/technetwork/java/javase/terms/license/java$(VERSION_NUMBER)speclicense.html
|
||||
REDISTRIBUTION_URL := https://www.oracle.com/technetwork/java/redist-137594.html
|
||||
|
||||
# In order to get a specific ordering it's necessary to specify the total
|
||||
# ordering of tags as the tags are otherwise ordered in order of definition.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,10 +55,10 @@ help:
|
||||
$(info $(_) make install # Install the generated images locally)
|
||||
$(info $(_) make reconfigure # Rerun configure with the same arguments as last time)
|
||||
$(info $(_) make help # Give some help on using make)
|
||||
$(info $(_) make test # Run tests, default is all tests (see TEST below))
|
||||
$(info $(_) make run-test-<test> # Run test, e.g. run-test-tier1)
|
||||
$(info $(_) make run-test TEST=<t> # Run test(s) given by TEST specification)
|
||||
$(info $(_) make exploded-run-test TEST=<t> # Run test(s) on the exploded image instead of)
|
||||
$(info $(_) make check # Run basic testing (currently tier1))
|
||||
$(info $(_) make test-<test> # Run test, e.g. test-tier1)
|
||||
$(info $(_) make test TEST=<t> # Run test(s) given by TEST specification)
|
||||
$(info $(_) make exploded-test TEST=<t> # Run test(s) on the exploded image instead of)
|
||||
$(info $(_) # the full jdk image)
|
||||
$(info )
|
||||
$(info Targets for cleaning)
|
||||
@ -99,10 +99,12 @@ help:
|
||||
$(info $(_) TEST_JOBS=<n> # Run <n> parallel test jobs)
|
||||
$(info $(_) CONF_CHECK=<method> # What to do if spec file is out of date)
|
||||
$(info $(_) # method is 'auto', 'ignore' or 'fail' (default))
|
||||
$(info $(_) make test TEST=<test> # Only run the given test or tests, e.g.)
|
||||
$(info $(_) # make test TEST="jdk_lang jdk_net")
|
||||
$(info $(_) JTREG="OPT1=x;OPT2=y" # Control the JTREG test harness for run-test)
|
||||
$(info $(_) GTEST="OPT1=x;OPT2=y" # Control the GTEST test harness for run-test)
|
||||
$(info $(_) TEST="test1 ..." # Use the given test descriptor(s) for testing, e.g.)
|
||||
$(info $(_) # make test TEST="jdk_lang gtest:all")
|
||||
$(info $(_) JTREG="OPT1=x;OPT2=y" # Control the JTREG test harness)
|
||||
$(info $(_) GTEST="OPT1=x;OPT2=y" # Control the GTEST test harness)
|
||||
$(info $(_) TEST_OPTS="OPT1=x;..." # Generic control of all test harnesses)
|
||||
$(info $(_) TEST_VM_OPTS="ARG ..." # Same as setting TEST_OPTS to VM_OPTIONS="ARG ...")
|
||||
$(info )
|
||||
$(if $(all_confs), $(info Available configurations in $(build_dir):) $(foreach var,$(all_confs),$(info * $(var))),\
|
||||
$(info No configurations were found in $(build_dir).) $(info Run 'bash configure' to create a configuration.))
|
||||
|
101
make/Main.gmk
101
make/Main.gmk
@ -473,10 +473,10 @@ ALL_TARGETS += $(INTERIM_JMOD_TARGETS) interim-image generate-link-opt-data
|
||||
#
|
||||
|
||||
define DeclareRunTestRecipe
|
||||
run-test-$1:
|
||||
test-$1:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test TEST="$1")
|
||||
|
||||
exploded-run-test-$1:
|
||||
exploded-test-$1:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test \
|
||||
TEST="$1" JDK_IMAGE_DIR=$(JDK_OUTPUTDIR))
|
||||
|
||||
@ -484,8 +484,8 @@ endef
|
||||
|
||||
# ALL_NAMED_TESTS is defined in FindTests.gmk
|
||||
$(foreach t, $(ALL_NAMED_TESTS), $(eval $(call DeclareRunTestRecipe,$t)))
|
||||
ALL_TEST_TARGETS := $(addprefix run-test-, $(ALL_NAMED_TESTS))
|
||||
ALL_EXPLODED_TEST_TARGETS := $(addprefix exploded-run-test-, $(ALL_NAMED_TESTS))
|
||||
ALL_TEST_TARGETS := $(addprefix test-, $(ALL_NAMED_TESTS))
|
||||
ALL_EXPLODED_TEST_TARGETS := $(addprefix exploded-test-, $(ALL_NAMED_TESTS))
|
||||
|
||||
ALL_TARGETS += $(ALL_TEST_TARGETS) $(ALL_EXPLODED_TEST_TARGETS)
|
||||
|
||||
@ -520,13 +520,6 @@ test-image-hotspot-jtreg-graal:
|
||||
+($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f JtregGraalUnit.gmk \
|
||||
test-image-hotspot-jtreg-graal)
|
||||
|
||||
run-test:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test TEST="$(TEST)")
|
||||
|
||||
exploded-run-test:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test \
|
||||
TEST="$(TEST)" JDK_IMAGE_DIR=$(JDK_OUTPUTDIR))
|
||||
|
||||
ifeq ($(BUILD_GTEST), true)
|
||||
test-image-hotspot-gtest:
|
||||
+($(CD) $(TOPDIR)/make/hotspot/test && $(MAKE) $(MAKE_ARGS) -f GtestImage.gmk)
|
||||
@ -541,11 +534,6 @@ ifeq ($(BUILD_FAILURE_HANDLER), true)
|
||||
+($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) \
|
||||
-f BuildFailureHandler.gmk build)
|
||||
|
||||
# Runs the tests for the failure handler jtreg extension
|
||||
test-failure-handler:
|
||||
+($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) \
|
||||
-f BuildFailureHandler.gmk test)
|
||||
|
||||
# Copies the failure handler jtreg extension into the test image
|
||||
test-image-failure-handler:
|
||||
+($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) \
|
||||
@ -556,40 +544,19 @@ ALL_TARGETS += prepare-test-image build-test-hotspot-jtreg-native \
|
||||
test-image-hotspot-jtreg-native build-test-jdk-jtreg-native \
|
||||
test-image-jdk-jtreg-native build-test-lib build-test-failure-handler \
|
||||
test-failure-handler test-image-failure-handler test-image-hotspot-gtest \
|
||||
test-image-hotspot-jtreg-graal build-test-hotspot-jtreg-graal \
|
||||
run-test exploded-run-test
|
||||
test-image-hotspot-jtreg-graal build-test-hotspot-jtreg-graal
|
||||
|
||||
################################################################################
|
||||
# Run tests
|
||||
|
||||
# Run tests specified by $(TEST), or the default test set.
|
||||
test:
|
||||
$(call RunTests, $(TEST), $(JDK_IMAGE_DIR))
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test TEST="$(TEST)")
|
||||
|
||||
test-hotspot-jtreg:
|
||||
$(call RunTests, "hotspot_all", $(JDK_IMAGE_DIR))
|
||||
exploded-test:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test \
|
||||
TEST="$(TEST)" JDK_IMAGE_DIR=$(JDK_OUTPUTDIR))
|
||||
|
||||
test-hotspot-jtreg-native:
|
||||
$(call RunTests, "hotspot_native_sanity", $(JDK_IMAGE_DIR))
|
||||
|
||||
test-hotspot-internal:
|
||||
$(call RunTests, "hotspot_internal", $(JDK_OUTPUTDIR))
|
||||
|
||||
test-hotspot-gtest:
|
||||
$(call RunTests, "hotspot_gtest", $(JDK_OUTPUTDIR))
|
||||
|
||||
test-jdk-jtreg-native:
|
||||
$(call RunTests, "jdk_native_sanity", $(JDK_IMAGE_DIR))
|
||||
|
||||
test-make:
|
||||
($(CD) $(TOPDIR)/test/make && $(MAKE) $(MAKE_ARGS) -f TestMake.gmk $(TEST_TARGET))
|
||||
|
||||
test-compile-commands:
|
||||
($(CD) $(TOPDIR)/test/make && $(MAKE) $(MAKE_ARGS) -f TestMake.gmk test-compile-commands)
|
||||
|
||||
ALL_TARGETS += test test-hotspot-jtreg test-hotspot-jtreg-native \
|
||||
test-hotspot-internal test-hotspot-gtest test-jdk-jtreg-native test-make \
|
||||
test-compile-commands
|
||||
ALL_TARGETS += test exploded-test
|
||||
|
||||
################################################################################
|
||||
# Bundles
|
||||
@ -874,14 +841,18 @@ else
|
||||
|
||||
docs-zip: docs-jdk
|
||||
|
||||
# Tests
|
||||
test: jdk-image test-image
|
||||
|
||||
run-test: jdk-image test-image
|
||||
exploded-run-test: exploded-image test-image
|
||||
exploded-test: exploded-image test-image
|
||||
|
||||
test-make: clean-test-make compile-commands
|
||||
|
||||
test-make-compile-commands: compile-commands
|
||||
|
||||
# Declare dependency for all generated test targets
|
||||
$(foreach t, $(ALL_TEST_TARGETS), $(eval $t: jdk-image test-image))
|
||||
$(foreach t, $(ALL_EXPLODED_TEST_TARGETS), $(eval $t: exploded-image test-image))
|
||||
$(foreach t, $(filter-out test-make%, $(ALL_TEST_TARGETS)), $(eval $t: jdk-image test-image))
|
||||
$(foreach t, $(filter-out exploded-test-make%, $(ALL_EXPLODED_TEST_TARGETS)), $(eval $t: exploded-image test-image))
|
||||
|
||||
create-buildjdk-copy: jdk.jlink-java java.base-gendata \
|
||||
$(addsuffix -java, $(INTERIM_IMAGE_MODULES))
|
||||
@ -890,16 +861,10 @@ else
|
||||
|
||||
interim-image: $(INTERIM_JMOD_TARGETS)
|
||||
|
||||
test-make: clean-test-make
|
||||
|
||||
test-compile-commands: compile-commands
|
||||
|
||||
build-test-lib: exploded-image-optimize
|
||||
|
||||
build-test-failure-handler: interim-langtools
|
||||
|
||||
test-failure-handler: build-test-failure-handler
|
||||
|
||||
test-image-failure-handler: build-test-failure-handler
|
||||
|
||||
build-test-hotspot-jtreg-native: buildtools-jdk \
|
||||
@ -917,12 +882,6 @@ else
|
||||
|
||||
test-image-hotspot-gtest: hotspot
|
||||
|
||||
test-hotspot-internal: exploded-image
|
||||
|
||||
test-hotspot-jtreg: jdk-image test-image
|
||||
|
||||
test-hotspot-gtest: exploded-image test-image-hotspot-gtest
|
||||
|
||||
install: product-images
|
||||
|
||||
product-bundles: product-images
|
||||
@ -1091,6 +1050,30 @@ all: all-images
|
||||
|
||||
ALL_TARGETS += default jdk images docs bundles all
|
||||
|
||||
# Aliases used for running tests.
|
||||
|
||||
# Let "run-test" be an alias for "test"
|
||||
$(foreach t, $(ALL_NAMED_TESTS), $(eval run-test-$t: test-$t))
|
||||
$(foreach t, $(ALL_NAMED_TESTS), $(eval exploded-run-test-$t: exploded-test-$t))
|
||||
RUN_TEST_TARGETS := $(addprefix run-test-, $(ALL_NAMED_TESTS)) \
|
||||
$(addprefix exploded-run-test-, $(ALL_NAMED_TESTS))
|
||||
|
||||
run-test: test
|
||||
exploded-run-test: exploded-test
|
||||
|
||||
# "make check" is a common idiom for running basic testing
|
||||
check: test-tier1
|
||||
|
||||
# Keep some old names as aliases
|
||||
test-hotspot-jtreg: test-hotspot_all
|
||||
test-hotspot-jtreg-native: test-hotspot_native_sanity
|
||||
test-hotspot-gtest: exploded-test-gtest
|
||||
test-jdk-jtreg-native: test-jdk_native_sanity
|
||||
|
||||
ALL_TARGETS += $(RUN_TEST_TARGETS) run-test exploded-run-test check \
|
||||
test-hotspot-jtreg test-hotspot-jtreg-native test-hotspot-gtest \
|
||||
test-jdk-jtreg-native
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
#
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,17 +30,6 @@
|
||||
ifndef _MAINSUPPORT_GMK
|
||||
_MAINSUPPORT_GMK := 1
|
||||
|
||||
# Run the tests specified by $1, with PRODUCT_HOME specified by $2
|
||||
# JT_JAVA is picked up by the jtreg launcher and used to run Jtreg itself.
|
||||
define RunTests
|
||||
($(CD) $(TOPDIR)/test && $(MAKE) $(MAKE_ARGS) -j1 -k MAKEFLAGS= \
|
||||
JT_HOME=$(JT_HOME) PRODUCT_HOME=$(strip $2) \
|
||||
TEST_IMAGE_DIR=$(TEST_IMAGE_DIR) \
|
||||
ALT_OUTPUTDIR=$(OUTPUTDIR) TEST_JOBS=$(TEST_JOBS) \
|
||||
JT_JAVA=$(BOOT_JDK) JIB_JAR=$(JIB_JAR) \
|
||||
JOBS=$(JOBS) $1) || true
|
||||
endef
|
||||
|
||||
define CleanDocs
|
||||
@$(PRINTF) "Cleaning docs ..."
|
||||
@$(PRINTF) "\n" $(LOG_DEBUG)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -65,10 +65,14 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
|
||||
ifndef _NT_SYMBOL_PATH
|
||||
# Can't use PathList here as it adds quotes around the value.
|
||||
_NT_SYMBOL_PATH := \
|
||||
$(subst $(SPACE),;, $(foreach p, $(sort $(dir $(wildcard \
|
||||
$(addprefix $(SYMBOLS_IMAGE_DIR)/bin/, *.pdb */*.pdb)))), $(call FixPath, $p)))
|
||||
$(subst $(SPACE),;,$(strip \
|
||||
$(foreach p, $(sort $(dir $(wildcard \
|
||||
$(addprefix $(SYMBOLS_IMAGE_DIR)/bin/, *.pdb */*.pdb)))), \
|
||||
$(call FixPath, $p) \
|
||||
) \
|
||||
))
|
||||
export _NT_SYMBOL_PATH
|
||||
$(info _NT_SYMBOL_PATH $(_NT_SYMBOL_PATH))
|
||||
$(info _NT_SYMBOL_PATH=$(_NT_SYMBOL_PATH))
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -296,16 +300,32 @@ endef
|
||||
|
||||
# Helper function to determine if a test specification is a special test
|
||||
#
|
||||
# It is a special test if it is "special:" followed by a test name.
|
||||
# It is a special test if it is "special:" followed by a test name,
|
||||
# if it is "make:" or "make-" followed by a make test, or any of the special test names
|
||||
# as a single word.
|
||||
define ParseSpecialTestSelection
|
||||
$(if $(filter special:%, $1), \
|
||||
$1 \
|
||||
) \
|
||||
$(if $(filter make%, $1), \
|
||||
$(if $(filter make:%, $1), \
|
||||
special:$(strip $1) \
|
||||
) \
|
||||
$(if $(filter make-%, $1), \
|
||||
special:$(patsubst make-%,make:%, $1) \
|
||||
) \
|
||||
$(if $(filter make, $1), \
|
||||
special:make:all \
|
||||
)
|
||||
) \
|
||||
$(if $(filter hotspot-internal failure-handler, $1), \
|
||||
special:$(strip $1) \
|
||||
)
|
||||
endef
|
||||
|
||||
ifeq ($(TEST), )
|
||||
$(info No test selection given in TEST!)
|
||||
$(info Please use e.g. 'run-test TEST=tier1' or 'run-test-tier1')
|
||||
$(info Please use e.g. 'make test TEST=tier1' or 'make test-tier1')
|
||||
$(info See doc/testing.[md|html] for help)
|
||||
$(error Cannot continue)
|
||||
endif
|
||||
@ -659,9 +679,13 @@ define SetupRunSpecialTestBody
|
||||
$$(JDK_IMAGE_DIR)/bin/java -XX:+ExecuteInternalVMTests \
|
||||
-XX:+ShowMessageBoxOnError -version
|
||||
else ifeq ($$($1_TEST_NAME), failure-handler)
|
||||
$1_TEST_COMMAND_LINE := \
|
||||
($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f \
|
||||
BuildFailureHandler.gmk test)
|
||||
ifeq ($(BUILD_FAILURE_HANDLER), true)
|
||||
$1_TEST_COMMAND_LINE := \
|
||||
($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f \
|
||||
BuildFailureHandler.gmk test)
|
||||
else
|
||||
$$(error Cannot test failure handler if it is not built)
|
||||
endif
|
||||
else ifeq ($$($1_TEST_NAME), make)
|
||||
$1_TEST_COMMAND_LINE := \
|
||||
($(CD) $(TOPDIR)/test/make && $(MAKE) $(MAKE_ARGS) -f \
|
||||
|
@ -182,3 +182,7 @@ ifeq ($(OPENJDK_BUILD_OS), solaris)
|
||||
SED := gsed
|
||||
TAR := gtar
|
||||
endif
|
||||
|
||||
ifeq ($(OPENJDK_BUILD_OS), windows)
|
||||
CYGPATH := cygpath
|
||||
endif
|
||||
|
@ -747,10 +747,6 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
|
||||
# -Wno-psabi to get rid of annoying "note: the mangling of 'va_list' has changed in GCC 4.4"
|
||||
$1_CFLAGS_CPU="-fsigned-char -Wno-psabi $ARM_ARCH_TYPE_FLAGS $ARM_FLOAT_TYPE_FLAGS -DJDK_ARCH_ABI_PROP_NAME='\"\$(JDK_ARCH_ABI_PROP_NAME)\"'"
|
||||
$1_CFLAGS_CPU_JVM="-DARM"
|
||||
elif test "x$FLAGS_CPU" = xaarch64; then
|
||||
if test "x$HOTSPOT_TARGET_CPU_PORT" = xarm64; then
|
||||
$1_CFLAGS_CPU_JVM="-fsigned-char -DARM"
|
||||
fi
|
||||
elif test "x$FLAGS_CPU_ARCH" = xppc; then
|
||||
$1_CFLAGS_CPU_JVM="-minsert-sched-nops=regroup_exact -mno-multiple -mno-string"
|
||||
if test "x$FLAGS_CPU" = xppc64; then
|
||||
|
@ -173,10 +173,6 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
|
||||
elif test "x$OPENJDK_$1_CPU" = xarm; then
|
||||
$1_CPU_LDFLAGS_JVM_ONLY="${$1_CPU_LDFLAGS_JVM_ONLY} -fsigned-char"
|
||||
$1_CPU_LDFLAGS="$ARM_ARCH_TYPE_FLAGS $ARM_FLOAT_TYPE_FLAGS"
|
||||
elif test "x$FLAGS_CPU" = xaarch64; then
|
||||
if test "x$HOTSPOT_TARGET_CPU_PORT" = xarm64; then
|
||||
$1_CPU_LDFLAGS_JVM_ONLY="${$1_CPU_LDFLAGS_JVM_ONLY} -fsigned-char"
|
||||
fi
|
||||
fi
|
||||
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
|
@ -34,7 +34,7 @@ m4_include([flags-other.m4])
|
||||
AC_DEFUN([FLAGS_SETUP_ABI_PROFILE],
|
||||
[
|
||||
AC_ARG_WITH(abi-profile, [AS_HELP_STRING([--with-abi-profile],
|
||||
[specify ABI profile for ARM builds (arm-vfp-sflt,arm-vfp-hflt,arm-sflt, armv5-vfp-sflt,armv6-vfp-hflt,arm64,aarch64) @<:@toolchain dependent@:>@ ])])
|
||||
[specify ABI profile for ARM builds (arm-vfp-sflt,arm-vfp-hflt,arm-sflt, armv5-vfp-sflt,armv6-vfp-hflt,aarch64) @<:@toolchain dependent@:>@ ])])
|
||||
|
||||
if test "x$with_abi_profile" != x; then
|
||||
if test "x$OPENJDK_TARGET_CPU" != xarm && \
|
||||
@ -61,10 +61,6 @@ AC_DEFUN([FLAGS_SETUP_ABI_PROFILE],
|
||||
elif test "x$OPENJDK_TARGET_ABI_PROFILE" = xarmv6-vfp-hflt; then
|
||||
ARM_FLOAT_TYPE=vfp-hflt
|
||||
ARM_ARCH_TYPE_FLAGS='-march=armv6 -marm'
|
||||
elif test "x$OPENJDK_TARGET_ABI_PROFILE" = xarm64; then
|
||||
# No special flags, just need to trigger setting JDK_ARCH_ABI_PROP_NAME
|
||||
ARM_FLOAT_TYPE=
|
||||
ARM_ARCH_TYPE_FLAGS=
|
||||
elif test "x$OPENJDK_TARGET_ABI_PROFILE" = xaarch64; then
|
||||
# No special flags, just need to trigger setting JDK_ARCH_ABI_PROP_NAME
|
||||
ARM_FLOAT_TYPE=
|
||||
|
@ -72,8 +72,6 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_VARIANTS],
|
||||
AC_ARG_WITH([jvm-variants], [AS_HELP_STRING([--with-jvm-variants],
|
||||
[JVM variants (separated by commas) to build (server,client,minimal,core,zero,custom) @<:@server@:>@])])
|
||||
|
||||
SETUP_HOTSPOT_TARGET_CPU_PORT
|
||||
|
||||
if test "x$with_jvm_variants" = x; then
|
||||
with_jvm_variants="server"
|
||||
fi
|
||||
@ -299,9 +297,6 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
if test "x$OPENJDK_TARGET_CPU" = xarm; then
|
||||
HOTSPOT_TARGET_CPU=arm_32
|
||||
HOTSPOT_TARGET_CPU_DEFINE="ARM32"
|
||||
elif test "x$OPENJDK_TARGET_CPU" = xaarch64 && test "x$HOTSPOT_TARGET_CPU_PORT" = xarm64; then
|
||||
HOTSPOT_TARGET_CPU=arm_64
|
||||
HOTSPOT_TARGET_CPU_ARCH=arm
|
||||
fi
|
||||
|
||||
# Verify that dependencies are met for explicitly set features.
|
||||
@ -542,6 +537,9 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
|
||||
# Used for verification of Makefiles by check-jvm-feature
|
||||
AC_SUBST(VALID_JVM_FEATURES)
|
||||
|
||||
# --with-cpu-port is no longer supported
|
||||
BASIC_DEPRECATED_ARG_WITH(with-cpu-port)
|
||||
])
|
||||
|
||||
###############################################################################
|
||||
@ -578,31 +576,6 @@ AC_DEFUN_ONCE([HOTSPOT_FINALIZE_JVM_FEATURES],
|
||||
done
|
||||
])
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Specify which sources will be used to build the 64-bit ARM port
|
||||
#
|
||||
# --with-cpu-port=arm64 will use hotspot/src/cpu/arm
|
||||
# --with-cpu-port=aarch64 will use hotspot/src/cpu/aarch64
|
||||
#
|
||||
AC_DEFUN([SETUP_HOTSPOT_TARGET_CPU_PORT],
|
||||
[
|
||||
AC_ARG_WITH(cpu-port, [AS_HELP_STRING([--with-cpu-port],
|
||||
[specify sources to use for Hotspot 64-bit ARM port (arm64,aarch64) @<:@aarch64@:>@ ])])
|
||||
|
||||
if test "x$with_cpu_port" != x; then
|
||||
if test "x$OPENJDK_TARGET_CPU" != xaarch64; then
|
||||
AC_MSG_ERROR([--with-cpu-port only available on aarch64])
|
||||
fi
|
||||
if test "x$with_cpu_port" != xarm64 && \
|
||||
test "x$with_cpu_port" != xaarch64; then
|
||||
AC_MSG_ERROR([--with-cpu-port must specify arm64 or aarch64])
|
||||
fi
|
||||
HOTSPOT_TARGET_CPU_PORT="$with_cpu_port"
|
||||
fi
|
||||
])
|
||||
|
||||
|
||||
################################################################################
|
||||
# Check if gtest should be built
|
||||
#
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,6 +72,15 @@ ALL_NAMED_TESTS += $(JTREG_TEST_GROUPS)
|
||||
# Add Gtest
|
||||
ALL_NAMED_TESTS += gtest
|
||||
|
||||
# Find make test targets
|
||||
MAKE_TEST_TARGETS := $(shell $(MAKE) -s --no-print-directory $(MAKE_ARGS) \
|
||||
SPEC=$(SPEC) -f $(TOPDIR)/test/make/TestMake.gmk print-targets)
|
||||
|
||||
ALL_NAMED_TESTS += $(addprefix make-, $(MAKE_TEST_TARGETS))
|
||||
|
||||
# Add special tests
|
||||
ALL_NAMED_TESTS += hotspot-internal failure-handler make
|
||||
|
||||
################################################################################
|
||||
|
||||
endif # _FIND_TESTS_GMK
|
||||
|
@ -233,8 +233,7 @@ var getJibProfilesCommon = function (input, data) {
|
||||
common.main_profile_names = [
|
||||
"linux-x64", "linux-x86", "macosx-x64", "solaris-x64",
|
||||
"solaris-sparcv9", "windows-x64", "windows-x86",
|
||||
"linux-aarch64", "linux-arm32", "linux-arm64", "linux-arm-vfp-hflt",
|
||||
"linux-arm-vfp-hflt-dyn"
|
||||
"linux-aarch64", "linux-arm32"
|
||||
];
|
||||
|
||||
// These are the base setttings for all the main build profiles.
|
||||
@ -440,20 +439,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--openjdk-target=aarch64-linux-gnu", "--with-freetype=bundled",
|
||||
"--disable-warnings-as-errors", "--with-cpu-port=aarch64",
|
||||
],
|
||||
},
|
||||
|
||||
"linux-arm64": {
|
||||
target_os: "linux",
|
||||
target_cpu: "aarch64",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "build_devkit", "cups", "headless_stubs"],
|
||||
configure_args: [
|
||||
"--with-cpu-port=arm64",
|
||||
"--with-jvm-variants=server",
|
||||
"--openjdk-target=aarch64-linux-gnu",
|
||||
"--enable-headless-only"
|
||||
"--disable-warnings-as-errors"
|
||||
],
|
||||
},
|
||||
|
||||
@ -467,30 +453,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"--with-abi-profile=arm-vfp-hflt", "--disable-warnings-as-errors"
|
||||
],
|
||||
},
|
||||
|
||||
"linux-arm-vfp-hflt": {
|
||||
target_os: "linux",
|
||||
target_cpu: "arm",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--with-jvm-variants=minimal1,client",
|
||||
"--with-x=" + input.get("devkit", "install_path") + "/arm-linux-gnueabihf/libc/usr/X11R6-PI",
|
||||
"--with-fontconfig=" + input.get("devkit", "install_path") + "/arm-linux-gnueabihf/libc/usr/X11R6-PI",
|
||||
"--openjdk-target=arm-linux-gnueabihf",
|
||||
"--with-abi-profile=arm-vfp-hflt",
|
||||
"--with-freetype=bundled"
|
||||
],
|
||||
},
|
||||
|
||||
// Special version of the SE profile adjusted to be testable on arm64 hardware.
|
||||
"linux-arm-vfp-hflt-dyn": {
|
||||
configure_args: "--with-stdc++lib=dynamic"
|
||||
}
|
||||
};
|
||||
// Let linux-arm-vfp-hflt-dyn inherit everything from linux-arm-vfp-hflt
|
||||
profiles["linux-arm-vfp-hflt-dyn"] = concatObjects(
|
||||
profiles["linux-arm-vfp-hflt-dyn"], profiles["linux-arm-vfp-hflt"]);
|
||||
|
||||
// Add the base settings to all the main profiles
|
||||
common.main_profile_names.forEach(function (name) {
|
||||
@ -617,15 +580,6 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
},
|
||||
"linux-arm32": {
|
||||
platform: "linux-arm32",
|
||||
},
|
||||
"linux-arm64": {
|
||||
platform: "linux-arm64-vfp-hflt",
|
||||
},
|
||||
"linux-arm-vfp-hflt": {
|
||||
platform: "linux-arm32-vfp-hflt",
|
||||
},
|
||||
"linux-arm-vfp-hflt-dyn": {
|
||||
platform: "linux-arm32-vfp-hflt-dyn",
|
||||
}
|
||||
}
|
||||
// Generate common artifacts for all main profiles
|
||||
@ -759,7 +713,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
testedProfile + ".test"
|
||||
],
|
||||
src: "src.conf",
|
||||
make_args: [ "run-test-prebuilt", "LOG_CMDLINES=true" ],
|
||||
make_args: [ "run-test-prebuilt", "LOG_CMDLINES=true", "JTREG_VERBOSE=fail,error,time" ],
|
||||
environment: {
|
||||
"BOOT_JDK": common.boot_jdk_home,
|
||||
"JDK_IMAGE_DIR": input.get(testedProfile + ".jdk", "home_path"),
|
||||
@ -845,21 +799,13 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
var getJibProfilesDependencies = function (input, common) {
|
||||
|
||||
var devkit_platform_revisions = {
|
||||
linux_x64: "gcc7.3.0-OEL6.4+1.0",
|
||||
linux_x64: "gcc7.3.0-OEL6.4+1.1",
|
||||
macosx_x64: "Xcode9.4-MacOSX10.13+1.0",
|
||||
solaris_x64: "SS12u4-Solaris11u1+1.0",
|
||||
solaris_sparcv9: "SS12u6-Solaris11u3+1.0",
|
||||
windows_x64: "VS2017-15.5.5+1.0",
|
||||
linux_aarch64: (input.profile != null && input.profile.indexOf("arm64") >= 0
|
||||
? "gcc-linaro-aarch64-linux-gnu-4.8-2013.11_linux+1.0"
|
||||
: "gcc7.3.0-Fedora27+1.0"),
|
||||
linux_arm: (input.profile != null && input.profile.indexOf("hflt") >= 0
|
||||
? "gcc-linaro-arm-linux-gnueabihf-raspbian-2012.09-20120921_linux+1.0"
|
||||
: (input.profile != null && input.profile.indexOf("arm32") >= 0
|
||||
? "gcc7.3.0-Fedora27+1.0"
|
||||
: "arm-linaro-4.7+1.0"
|
||||
)
|
||||
)
|
||||
linux_aarch64: "gcc7.3.0-Fedora27+1.0",
|
||||
linux_arm: "gcc7.3.0-Fedora27+1.0"
|
||||
};
|
||||
|
||||
var devkit_platform = (input.target_cpu == "x86"
|
||||
|
@ -36,7 +36,7 @@
|
||||
# By default this Makefile will build a native toolchain for the current
|
||||
# platform if called with something like this:
|
||||
#
|
||||
# make tars
|
||||
# make tars BASE_OS=OEL6
|
||||
#
|
||||
# To build the full set of crosstools for additional platforms, use a command
|
||||
# line looking like this:
|
||||
|
@ -110,6 +110,7 @@ RPM_LIST := \
|
||||
libXext libXext-devel \
|
||||
libXtst libXtst-devel \
|
||||
libXrender libXrender-devel \
|
||||
libXrandr libXrandr-devel \
|
||||
freetype freetype-devel \
|
||||
libXt libXt-devel \
|
||||
libSM libSM-devel \
|
||||
|
@ -60,12 +60,6 @@ ifeq ($(OPENJDK_TARGET_CPU), x86_64)
|
||||
OPENJDK_TARGET_CPU_VM_VERSION := amd64
|
||||
else ifeq ($(OPENJDK_TARGET_CPU), sparcv9)
|
||||
OPENJDK_TARGET_CPU_VM_VERSION := sparc
|
||||
else ifeq ($(HOTSPOT_TARGET_CPU_ARCH), arm)
|
||||
ifeq ($(OPENJDK_TARGET_CPU), aarch64)
|
||||
# This sets the Oracle Aarch64 port to use arm64
|
||||
# while the original Aarch64 port uses aarch64
|
||||
OPENJDK_TARGET_CPU_VM_VERSION := arm64
|
||||
endif
|
||||
else
|
||||
OPENJDK_TARGET_CPU_VM_VERSION := $(OPENJDK_TARGET_CPU)
|
||||
endif
|
||||
|
@ -48,7 +48,7 @@ import static com.sun.source.doctree.DocTree.Kind.*;
|
||||
* will produce the following html
|
||||
* <p>
|
||||
* {@code
|
||||
* Please see <a href="https://www.oracle.com/pls/topic/lookup?ctx=javase10&id=Borealis">a spectacular</a> sight.
|
||||
* Please see <a href="https://docs.oracle.com/pls/topic/lookup?ctx=javase10&id=Borealis">a spectacular</a> sight.
|
||||
* }
|
||||
*/
|
||||
public class ExtLink implements Taglet {
|
||||
@ -63,7 +63,7 @@ public class ExtLink implements Taglet {
|
||||
|
||||
static final String TAG_NAME = "extLink";
|
||||
|
||||
static final String URL = "https://www.oracle.com/pls/topic/lookup?ctx=javase" +
|
||||
static final String URL = "https://docs.oracle.com/pls/topic/lookup?ctx=javase" +
|
||||
SPEC_VERSION + "&id=";
|
||||
|
||||
static final Pattern TAG_PATTERN = Pattern.compile("(?s)(\\s*)(?<name>\\w+)(\\s+)(?<desc>.*)$");
|
||||
|
@ -38,19 +38,6 @@
|
||||
int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
int i = 0;
|
||||
switch (type) {
|
||||
#ifdef AARCH64
|
||||
case T_BOOLEAN: i = 0; break;
|
||||
case T_CHAR : i = 1; break;
|
||||
case T_BYTE : i = 2; break;
|
||||
case T_SHORT : i = 3; break;
|
||||
case T_INT : // fall through
|
||||
case T_LONG : // fall through
|
||||
case T_VOID : // fall through
|
||||
case T_FLOAT : // fall through
|
||||
case T_DOUBLE : i = 4; break;
|
||||
case T_OBJECT : // fall through
|
||||
case T_ARRAY : i = 5; break;
|
||||
#else
|
||||
case T_VOID : i = 0; break;
|
||||
case T_BOOLEAN: i = 1; break;
|
||||
case T_CHAR : i = 2; break;
|
||||
@ -62,7 +49,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
case T_LONG : i = 7; break;
|
||||
case T_FLOAT : i = 8; break;
|
||||
case T_DOUBLE : i = 9; break;
|
||||
#endif // AARCH64
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
|
||||
@ -71,7 +57,7 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
|
||||
// How much stack a method activation needs in words.
|
||||
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
const int stub_code = AARCH64_ONLY(24) NOT_AARCH64(12); // see generate_call_stub
|
||||
const int stub_code = 12; // see generate_call_stub
|
||||
// Save space for one monitor to get into the interpreted method in case
|
||||
// the method is synchronized
|
||||
int monitor_size = method->is_synchronized() ?
|
||||
@ -108,9 +94,6 @@ int AbstractInterpreter::size_activation(int max_stack,
|
||||
(moncount*frame::interpreter_frame_monitor_size()) +
|
||||
tempcount*Interpreter::stackElementWords + extra_args;
|
||||
|
||||
#ifdef AARCH64
|
||||
size = align_up(size, StackAlignmentInBytes/BytesPerWord);
|
||||
#endif // AARCH64
|
||||
|
||||
return size;
|
||||
}
|
||||
@ -146,65 +129,7 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
// interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
|
||||
// and sender_sp is (fp + sender_sp_offset*wordSize)
|
||||
|
||||
#ifdef AARCH64
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// attach locals to the expression stack of caller interpreter frame
|
||||
locals = caller->interpreter_frame_tos_address() + caller_actual_parameters*Interpreter::stackElementWords - 1;
|
||||
} else {
|
||||
assert (is_bottom_frame, "should be");
|
||||
locals = interpreter_frame->fp() + frame::sender_sp_offset + method->max_locals() - 1;
|
||||
}
|
||||
|
||||
if (TraceDeoptimization) {
|
||||
tty->print_cr("layout_activation:");
|
||||
|
||||
if (caller->is_entry_frame()) {
|
||||
tty->print("entry ");
|
||||
}
|
||||
if (caller->is_compiled_frame()) {
|
||||
tty->print("compiled ");
|
||||
}
|
||||
if (caller->is_interpreted_frame()) {
|
||||
tty->print("interpreted ");
|
||||
}
|
||||
tty->print_cr("caller: sp=%p, unextended_sp=%p, fp=%p, pc=%p", caller->sp(), caller->unextended_sp(), caller->fp(), caller->pc());
|
||||
tty->print_cr("interpreter_frame: sp=%p, unextended_sp=%p, fp=%p, pc=%p", interpreter_frame->sp(), interpreter_frame->unextended_sp(), interpreter_frame->fp(), interpreter_frame->pc());
|
||||
tty->print_cr("method: max_locals = %d, size_of_parameters = %d", method->max_locals(), method->size_of_parameters());
|
||||
tty->print_cr("caller_actual_parameters = %d", caller_actual_parameters);
|
||||
tty->print_cr("locals = %p", locals);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (caller_actual_parameters != method->size_of_parameters()) {
|
||||
assert(caller->is_interpreted_frame(), "adjusted caller_actual_parameters, but caller is not interpreter frame");
|
||||
Bytecode_invoke inv(caller->interpreter_frame_method(), caller->interpreter_frame_bci());
|
||||
|
||||
if (is_bottom_frame) {
|
||||
assert(caller_actual_parameters == 0, "invalid adjusted caller_actual_parameters value for bottom frame");
|
||||
assert(inv.is_invokedynamic() || inv.is_invokehandle(), "adjusted caller_actual_parameters for bottom frame, but not invokedynamic/invokehandle");
|
||||
} else {
|
||||
assert(caller_actual_parameters == method->size_of_parameters()+1, "invalid adjusted caller_actual_parameters value");
|
||||
assert(!inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name()), "adjusted caller_actual_parameters, but no member arg");
|
||||
}
|
||||
}
|
||||
if (caller->is_interpreted_frame()) {
|
||||
intptr_t* locals_base = (locals - method->max_locals()*Interpreter::stackElementWords + 1);
|
||||
locals_base = align_down(locals_base, StackAlignmentInBytes);
|
||||
assert(interpreter_frame->sender_sp() <= locals_base, "interpreter-to-interpreter frame chaining");
|
||||
|
||||
} else if (caller->is_compiled_frame()) {
|
||||
assert(locals + 1 <= caller->unextended_sp(), "compiled-to-interpreter frame chaining");
|
||||
|
||||
} else {
|
||||
assert(caller->is_entry_frame(), "should be");
|
||||
assert(locals + 1 <= caller->fp(), "entry-to-interpreter frame chaining");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#else
|
||||
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
#endif // AARCH64
|
||||
|
||||
interpreter_frame->interpreter_frame_set_locals(locals);
|
||||
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
|
||||
@ -215,44 +140,16 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
intptr_t* stack_top = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
#ifdef AARCH64
|
||||
interpreter_frame->interpreter_frame_set_stack_top(stack_top);
|
||||
|
||||
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
|
||||
// none of which are at the same time, so we just need to make sure there is enough room
|
||||
// for the biggest user:
|
||||
// -reserved slot for exception handler
|
||||
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
|
||||
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
|
||||
int max_stack = method->constMethod()->max_stack() + MAX2(3, Method::extra_stack_entries());
|
||||
intptr_t* extended_sp = (intptr_t*) monbot -
|
||||
(max_stack * Interpreter::stackElementWords) -
|
||||
popframe_extra_args;
|
||||
extended_sp = align_down(extended_sp, StackAlignmentInBytes);
|
||||
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
|
||||
#else
|
||||
interpreter_frame->interpreter_frame_set_last_sp(stack_top);
|
||||
#endif // AARCH64
|
||||
|
||||
// All frames but the initial (oldest) interpreter frame we fill in have a
|
||||
// value for sender_sp that allows walking the stack but isn't
|
||||
// truly correct. Correct the value here.
|
||||
|
||||
#ifdef AARCH64
|
||||
if (caller->is_interpreted_frame()) {
|
||||
intptr_t* sender_sp = align_down(caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(sender_sp);
|
||||
|
||||
} else {
|
||||
// in case of non-interpreter caller sender_sp of the oldest frame is already
|
||||
// set to valid value
|
||||
}
|
||||
#else
|
||||
if (extra_locals != 0 &&
|
||||
interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,998 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
// ARM Architecture Description File
|
||||
|
||||
//----------REGISTER DEFINITION BLOCK------------------------------------------
|
||||
// This information is used by the matcher and the register allocator to
|
||||
// describe individual registers and classes of registers within the target
|
||||
// archtecture.
|
||||
register %{
|
||||
//----------Architecture Description Register Definitions----------------------
|
||||
// General Registers
|
||||
// "reg_def" name ( register save type, C convention save type,
|
||||
// ideal register type, encoding, vm name );
|
||||
// Register Save Types:
|
||||
//
|
||||
// NS = No-Save: The register allocator assumes that these registers
|
||||
// can be used without saving upon entry to the method, &
|
||||
// that they do not need to be saved at call sites.
|
||||
//
|
||||
// SOC = Save-On-Call: The register allocator assumes that these registers
|
||||
// can be used without saving upon entry to the method,
|
||||
// but that they must be saved at call sites.
|
||||
//
|
||||
// SOE = Save-On-Entry: The register allocator assumes that these registers
|
||||
// must be saved before using them upon entry to the
|
||||
// method, but they do not need to be saved at call
|
||||
// sites.
|
||||
//
|
||||
// AS = Always-Save: The register allocator assumes that these registers
|
||||
// must be saved before using them upon entry to the
|
||||
// method, & that they must be saved at call sites.
|
||||
//
|
||||
// Ideal Register Type is used to determine how to save & restore a
|
||||
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
|
||||
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
|
||||
// FIXME: above comment seems wrong. Spill done through MachSpillCopyNode
|
||||
//
|
||||
// The encoding number is the actual bit-pattern placed into the opcodes.
|
||||
|
||||
|
||||
// ----------------------------
|
||||
// Integer/Long Registers
|
||||
// ----------------------------
|
||||
|
||||
// TODO: would be nice to keep track of high-word state:
|
||||
// zeroRegI --> RegL
|
||||
// signedRegI --> RegL
|
||||
// junkRegI --> RegL
|
||||
// how to tell C2 to treak RegI as RegL, or RegL as RegI?
|
||||
reg_def R_R0 (SOC, SOC, Op_RegI, 0, R0->as_VMReg());
|
||||
reg_def R_R0x (SOC, SOC, Op_RegI, 255, R0->as_VMReg()->next());
|
||||
reg_def R_R1 (SOC, SOC, Op_RegI, 1, R1->as_VMReg());
|
||||
reg_def R_R1x (SOC, SOC, Op_RegI, 255, R1->as_VMReg()->next());
|
||||
reg_def R_R2 (SOC, SOC, Op_RegI, 2, R2->as_VMReg());
|
||||
reg_def R_R2x (SOC, SOC, Op_RegI, 255, R2->as_VMReg()->next());
|
||||
reg_def R_R3 (SOC, SOC, Op_RegI, 3, R3->as_VMReg());
|
||||
reg_def R_R3x (SOC, SOC, Op_RegI, 255, R3->as_VMReg()->next());
|
||||
reg_def R_R4 (SOC, SOC, Op_RegI, 4, R4->as_VMReg());
|
||||
reg_def R_R4x (SOC, SOC, Op_RegI, 255, R4->as_VMReg()->next());
|
||||
reg_def R_R5 (SOC, SOC, Op_RegI, 5, R5->as_VMReg());
|
||||
reg_def R_R5x (SOC, SOC, Op_RegI, 255, R5->as_VMReg()->next());
|
||||
reg_def R_R6 (SOC, SOC, Op_RegI, 6, R6->as_VMReg());
|
||||
reg_def R_R6x (SOC, SOC, Op_RegI, 255, R6->as_VMReg()->next());
|
||||
reg_def R_R7 (SOC, SOC, Op_RegI, 7, R7->as_VMReg());
|
||||
reg_def R_R7x (SOC, SOC, Op_RegI, 255, R7->as_VMReg()->next());
|
||||
|
||||
reg_def R_R8 (SOC, SOC, Op_RegI, 8, R8->as_VMReg());
|
||||
reg_def R_R8x (SOC, SOC, Op_RegI, 255, R8->as_VMReg()->next());
|
||||
reg_def R_R9 (SOC, SOC, Op_RegI, 9, R9->as_VMReg());
|
||||
reg_def R_R9x (SOC, SOC, Op_RegI, 255, R9->as_VMReg()->next());
|
||||
reg_def R_R10 (SOC, SOC, Op_RegI, 10, R10->as_VMReg());
|
||||
reg_def R_R10x(SOC, SOC, Op_RegI, 255, R10->as_VMReg()->next());
|
||||
reg_def R_R11 (SOC, SOC, Op_RegI, 11, R11->as_VMReg());
|
||||
reg_def R_R11x(SOC, SOC, Op_RegI, 255, R11->as_VMReg()->next());
|
||||
reg_def R_R12 (SOC, SOC, Op_RegI, 12, R12->as_VMReg());
|
||||
reg_def R_R12x(SOC, SOC, Op_RegI, 255, R12->as_VMReg()->next());
|
||||
reg_def R_R13 (SOC, SOC, Op_RegI, 13, R13->as_VMReg());
|
||||
reg_def R_R13x(SOC, SOC, Op_RegI, 255, R13->as_VMReg()->next());
|
||||
reg_def R_R14 (SOC, SOC, Op_RegI, 14, R14->as_VMReg());
|
||||
reg_def R_R14x(SOC, SOC, Op_RegI, 255, R14->as_VMReg()->next());
|
||||
reg_def R_R15 (SOC, SOC, Op_RegI, 15, R15->as_VMReg());
|
||||
reg_def R_R15x(SOC, SOC, Op_RegI, 255, R15->as_VMReg()->next());
|
||||
|
||||
reg_def R_R16 (SOC, SOC, Op_RegI, 16, R16->as_VMReg()); // IP0
|
||||
reg_def R_R16x(SOC, SOC, Op_RegI, 255, R16->as_VMReg()->next());
|
||||
reg_def R_R17 (SOC, SOC, Op_RegI, 17, R17->as_VMReg()); // IP1
|
||||
reg_def R_R17x(SOC, SOC, Op_RegI, 255, R17->as_VMReg()->next());
|
||||
reg_def R_R18 (SOC, SOC, Op_RegI, 18, R18->as_VMReg()); // Platform Register
|
||||
reg_def R_R18x(SOC, SOC, Op_RegI, 255, R18->as_VMReg()->next());
|
||||
|
||||
reg_def R_R19 (SOC, SOE, Op_RegI, 19, R19->as_VMReg());
|
||||
reg_def R_R19x(SOC, SOE, Op_RegI, 255, R19->as_VMReg()->next());
|
||||
reg_def R_R20 (SOC, SOE, Op_RegI, 20, R20->as_VMReg());
|
||||
reg_def R_R20x(SOC, SOE, Op_RegI, 255, R20->as_VMReg()->next());
|
||||
reg_def R_R21 (SOC, SOE, Op_RegI, 21, R21->as_VMReg());
|
||||
reg_def R_R21x(SOC, SOE, Op_RegI, 255, R21->as_VMReg()->next());
|
||||
reg_def R_R22 (SOC, SOE, Op_RegI, 22, R22->as_VMReg());
|
||||
reg_def R_R22x(SOC, SOE, Op_RegI, 255, R22->as_VMReg()->next());
|
||||
reg_def R_R23 (SOC, SOE, Op_RegI, 23, R23->as_VMReg());
|
||||
reg_def R_R23x(SOC, SOE, Op_RegI, 255, R23->as_VMReg()->next());
|
||||
reg_def R_R24 (SOC, SOE, Op_RegI, 24, R24->as_VMReg());
|
||||
reg_def R_R24x(SOC, SOE, Op_RegI, 255, R24->as_VMReg()->next());
|
||||
reg_def R_R25 (SOC, SOE, Op_RegI, 25, R25->as_VMReg());
|
||||
reg_def R_R25x(SOC, SOE, Op_RegI, 255, R25->as_VMReg()->next());
|
||||
reg_def R_R26 (SOC, SOE, Op_RegI, 26, R26->as_VMReg());
|
||||
reg_def R_R26x(SOC, SOE, Op_RegI, 255, R26->as_VMReg()->next());
|
||||
reg_def R_R27 (SOC, SOE, Op_RegI, 27, R27->as_VMReg()); // Rheap_base
|
||||
reg_def R_R27x(SOC, SOE, Op_RegI, 255, R27->as_VMReg()->next()); // Rheap_base
|
||||
reg_def R_R28 ( NS, SOE, Op_RegI, 28, R28->as_VMReg()); // TLS
|
||||
reg_def R_R28x( NS, SOE, Op_RegI, 255, R28->as_VMReg()->next()); // TLS
|
||||
|
||||
reg_def R_R29 ( NS, SOE, Op_RegI, 29, R29->as_VMReg()); // FP
|
||||
reg_def R_R29x( NS, SOE, Op_RegI, 255, R29->as_VMReg()->next()); // FP
|
||||
reg_def R_R30 (SOC, SOC, Op_RegI, 30, R30->as_VMReg()); // LR
|
||||
reg_def R_R30x(SOC, SOC, Op_RegI, 255, R30->as_VMReg()->next()); // LR
|
||||
|
||||
reg_def R_ZR ( NS, NS, Op_RegI, 31, ZR->as_VMReg()); // ZR
|
||||
reg_def R_ZRx( NS, NS, Op_RegI, 255, ZR->as_VMReg()->next()); // ZR
|
||||
|
||||
// FIXME
|
||||
//reg_def R_SP ( NS, NS, Op_RegP, 32, SP->as_VMReg());
|
||||
reg_def R_SP ( NS, NS, Op_RegI, 32, SP->as_VMReg());
|
||||
//reg_def R_SPx( NS, NS, Op_RegP, 255, SP->as_VMReg()->next());
|
||||
reg_def R_SPx( NS, NS, Op_RegI, 255, SP->as_VMReg()->next());
|
||||
|
||||
// ----------------------------
|
||||
// Float/Double/Vector Registers
|
||||
// ----------------------------
|
||||
|
||||
reg_def R_V0(SOC, SOC, Op_RegF, 0, V0->as_VMReg());
|
||||
reg_def R_V1(SOC, SOC, Op_RegF, 1, V1->as_VMReg());
|
||||
reg_def R_V2(SOC, SOC, Op_RegF, 2, V2->as_VMReg());
|
||||
reg_def R_V3(SOC, SOC, Op_RegF, 3, V3->as_VMReg());
|
||||
reg_def R_V4(SOC, SOC, Op_RegF, 4, V4->as_VMReg());
|
||||
reg_def R_V5(SOC, SOC, Op_RegF, 5, V5->as_VMReg());
|
||||
reg_def R_V6(SOC, SOC, Op_RegF, 6, V6->as_VMReg());
|
||||
reg_def R_V7(SOC, SOC, Op_RegF, 7, V7->as_VMReg());
|
||||
reg_def R_V8(SOC, SOC, Op_RegF, 8, V8->as_VMReg());
|
||||
reg_def R_V9(SOC, SOC, Op_RegF, 9, V9->as_VMReg());
|
||||
reg_def R_V10(SOC, SOC, Op_RegF, 10, V10->as_VMReg());
|
||||
reg_def R_V11(SOC, SOC, Op_RegF, 11, V11->as_VMReg());
|
||||
reg_def R_V12(SOC, SOC, Op_RegF, 12, V12->as_VMReg());
|
||||
reg_def R_V13(SOC, SOC, Op_RegF, 13, V13->as_VMReg());
|
||||
reg_def R_V14(SOC, SOC, Op_RegF, 14, V14->as_VMReg());
|
||||
reg_def R_V15(SOC, SOC, Op_RegF, 15, V15->as_VMReg());
|
||||
reg_def R_V16(SOC, SOC, Op_RegF, 16, V16->as_VMReg());
|
||||
reg_def R_V17(SOC, SOC, Op_RegF, 17, V17->as_VMReg());
|
||||
reg_def R_V18(SOC, SOC, Op_RegF, 18, V18->as_VMReg());
|
||||
reg_def R_V19(SOC, SOC, Op_RegF, 19, V19->as_VMReg());
|
||||
reg_def R_V20(SOC, SOC, Op_RegF, 20, V20->as_VMReg());
|
||||
reg_def R_V21(SOC, SOC, Op_RegF, 21, V21->as_VMReg());
|
||||
reg_def R_V22(SOC, SOC, Op_RegF, 22, V22->as_VMReg());
|
||||
reg_def R_V23(SOC, SOC, Op_RegF, 23, V23->as_VMReg());
|
||||
reg_def R_V24(SOC, SOC, Op_RegF, 24, V24->as_VMReg());
|
||||
reg_def R_V25(SOC, SOC, Op_RegF, 25, V25->as_VMReg());
|
||||
reg_def R_V26(SOC, SOC, Op_RegF, 26, V26->as_VMReg());
|
||||
reg_def R_V27(SOC, SOC, Op_RegF, 27, V27->as_VMReg());
|
||||
reg_def R_V28(SOC, SOC, Op_RegF, 28, V28->as_VMReg());
|
||||
reg_def R_V29(SOC, SOC, Op_RegF, 29, V29->as_VMReg());
|
||||
reg_def R_V30(SOC, SOC, Op_RegF, 30, V30->as_VMReg());
|
||||
reg_def R_V31(SOC, SOC, Op_RegF, 31, V31->as_VMReg());
|
||||
|
||||
reg_def R_V0b(SOC, SOC, Op_RegF, 255, V0->as_VMReg()->next(1));
|
||||
reg_def R_V1b(SOC, SOC, Op_RegF, 255, V1->as_VMReg()->next(1));
|
||||
reg_def R_V2b(SOC, SOC, Op_RegF, 255, V2->as_VMReg()->next(1));
|
||||
reg_def R_V3b(SOC, SOC, Op_RegF, 3, V3->as_VMReg()->next(1));
|
||||
reg_def R_V4b(SOC, SOC, Op_RegF, 4, V4->as_VMReg()->next(1));
|
||||
reg_def R_V5b(SOC, SOC, Op_RegF, 5, V5->as_VMReg()->next(1));
|
||||
reg_def R_V6b(SOC, SOC, Op_RegF, 6, V6->as_VMReg()->next(1));
|
||||
reg_def R_V7b(SOC, SOC, Op_RegF, 7, V7->as_VMReg()->next(1));
|
||||
reg_def R_V8b(SOC, SOC, Op_RegF, 255, V8->as_VMReg()->next(1));
|
||||
reg_def R_V9b(SOC, SOC, Op_RegF, 9, V9->as_VMReg()->next(1));
|
||||
reg_def R_V10b(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(1));
|
||||
reg_def R_V11b(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(1));
|
||||
reg_def R_V12b(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(1));
|
||||
reg_def R_V13b(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(1));
|
||||
reg_def R_V14b(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(1));
|
||||
reg_def R_V15b(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(1));
|
||||
reg_def R_V16b(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(1));
|
||||
reg_def R_V17b(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(1));
|
||||
reg_def R_V18b(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(1));
|
||||
reg_def R_V19b(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(1));
|
||||
reg_def R_V20b(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(1));
|
||||
reg_def R_V21b(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(1));
|
||||
reg_def R_V22b(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(1));
|
||||
reg_def R_V23b(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(1));
|
||||
reg_def R_V24b(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(1));
|
||||
reg_def R_V25b(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(1));
|
||||
reg_def R_V26b(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(1));
|
||||
reg_def R_V27b(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(1));
|
||||
reg_def R_V28b(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(1));
|
||||
reg_def R_V29b(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(1));
|
||||
reg_def R_V30b(SOC, SOC, Op_RegD, 30, V30->as_VMReg()->next(1));
|
||||
reg_def R_V31b(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(1));
|
||||
|
||||
reg_def R_V0c(SOC, SOC, Op_RegF, 0, V0->as_VMReg()->next(2));
|
||||
reg_def R_V1c(SOC, SOC, Op_RegF, 1, V1->as_VMReg()->next(2));
|
||||
reg_def R_V2c(SOC, SOC, Op_RegF, 2, V2->as_VMReg()->next(2));
|
||||
reg_def R_V3c(SOC, SOC, Op_RegF, 3, V3->as_VMReg()->next(2));
|
||||
reg_def R_V4c(SOC, SOC, Op_RegF, 4, V4->as_VMReg()->next(2));
|
||||
reg_def R_V5c(SOC, SOC, Op_RegF, 5, V5->as_VMReg()->next(2));
|
||||
reg_def R_V6c(SOC, SOC, Op_RegF, 6, V6->as_VMReg()->next(2));
|
||||
reg_def R_V7c(SOC, SOC, Op_RegF, 7, V7->as_VMReg()->next(2));
|
||||
reg_def R_V8c(SOC, SOC, Op_RegF, 8, V8->as_VMReg()->next(2));
|
||||
reg_def R_V9c(SOC, SOC, Op_RegF, 9, V9->as_VMReg()->next(2));
|
||||
reg_def R_V10c(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(2));
|
||||
reg_def R_V11c(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(2));
|
||||
reg_def R_V12c(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(2));
|
||||
reg_def R_V13c(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(2));
|
||||
reg_def R_V14c(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(2));
|
||||
reg_def R_V15c(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(2));
|
||||
reg_def R_V16c(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(2));
|
||||
reg_def R_V17c(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(2));
|
||||
reg_def R_V18c(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(2));
|
||||
reg_def R_V19c(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(2));
|
||||
reg_def R_V20c(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(2));
|
||||
reg_def R_V21c(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(2));
|
||||
reg_def R_V22c(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(2));
|
||||
reg_def R_V23c(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(2));
|
||||
reg_def R_V24c(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(2));
|
||||
reg_def R_V25c(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(2));
|
||||
reg_def R_V26c(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(2));
|
||||
reg_def R_V27c(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(2));
|
||||
reg_def R_V28c(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(2));
|
||||
reg_def R_V29c(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(2));
|
||||
reg_def R_V30c(SOC, SOC, Op_RegF, 30, V30->as_VMReg()->next(2));
|
||||
reg_def R_V31c(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(2));
|
||||
|
||||
reg_def R_V0d(SOC, SOC, Op_RegF, 0, V0->as_VMReg()->next(3));
|
||||
reg_def R_V1d(SOC, SOC, Op_RegF, 1, V1->as_VMReg()->next(3));
|
||||
reg_def R_V2d(SOC, SOC, Op_RegF, 2, V2->as_VMReg()->next(3));
|
||||
reg_def R_V3d(SOC, SOC, Op_RegF, 3, V3->as_VMReg()->next(3));
|
||||
reg_def R_V4d(SOC, SOC, Op_RegF, 4, V4->as_VMReg()->next(3));
|
||||
reg_def R_V5d(SOC, SOC, Op_RegF, 5, V5->as_VMReg()->next(3));
|
||||
reg_def R_V6d(SOC, SOC, Op_RegF, 6, V6->as_VMReg()->next(3));
|
||||
reg_def R_V7d(SOC, SOC, Op_RegF, 7, V7->as_VMReg()->next(3));
|
||||
reg_def R_V8d(SOC, SOC, Op_RegF, 8, V8->as_VMReg()->next(3));
|
||||
reg_def R_V9d(SOC, SOC, Op_RegF, 9, V9->as_VMReg()->next(3));
|
||||
reg_def R_V10d(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(3));
|
||||
reg_def R_V11d(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(3));
|
||||
reg_def R_V12d(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(3));
|
||||
reg_def R_V13d(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(3));
|
||||
reg_def R_V14d(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(3));
|
||||
reg_def R_V15d(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(3));
|
||||
reg_def R_V16d(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(3));
|
||||
reg_def R_V17d(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(3));
|
||||
reg_def R_V18d(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(3));
|
||||
reg_def R_V19d(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(3));
|
||||
reg_def R_V20d(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(3));
|
||||
reg_def R_V21d(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(3));
|
||||
reg_def R_V22d(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(3));
|
||||
reg_def R_V23d(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(3));
|
||||
reg_def R_V24d(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(3));
|
||||
reg_def R_V25d(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(3));
|
||||
reg_def R_V26d(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(3));
|
||||
reg_def R_V27d(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(3));
|
||||
reg_def R_V28d(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(3));
|
||||
reg_def R_V29d(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(3));
|
||||
reg_def R_V30d(SOC, SOC, Op_RegF, 30, V30->as_VMReg()->next(3));
|
||||
reg_def R_V31d(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(3));
|
||||
|
||||
// ----------------------------
|
||||
// Special Registers
|
||||
// Condition Codes Flag Registers
|
||||
reg_def APSR (SOC, SOC, Op_RegFlags, 255, VMRegImpl::Bad());
|
||||
reg_def FPSCR(SOC, SOC, Op_RegFlags, 255, VMRegImpl::Bad());
|
||||
|
||||
// ----------------------------
|
||||
// Specify the enum values for the registers. These enums are only used by the
|
||||
// OptoReg "class". We can convert these enum values at will to VMReg when needed
|
||||
// for visibility to the rest of the vm. The order of this enum influences the
|
||||
// register allocator so having the freedom to set this order and not be stuck
|
||||
// with the order that is natural for the rest of the vm is worth it.
|
||||
|
||||
// Quad vector must be aligned here, so list them first.
|
||||
alloc_class fprs(
|
||||
R_V8, R_V8b, R_V8c, R_V8d, R_V9, R_V9b, R_V9c, R_V9d,
|
||||
R_V10, R_V10b, R_V10c, R_V10d, R_V11, R_V11b, R_V11c, R_V11d,
|
||||
R_V12, R_V12b, R_V12c, R_V12d, R_V13, R_V13b, R_V13c, R_V13d,
|
||||
R_V14, R_V14b, R_V14c, R_V14d, R_V15, R_V15b, R_V15c, R_V15d,
|
||||
R_V16, R_V16b, R_V16c, R_V16d, R_V17, R_V17b, R_V17c, R_V17d,
|
||||
R_V18, R_V18b, R_V18c, R_V18d, R_V19, R_V19b, R_V19c, R_V19d,
|
||||
R_V20, R_V20b, R_V20c, R_V20d, R_V21, R_V21b, R_V21c, R_V21d,
|
||||
R_V22, R_V22b, R_V22c, R_V22d, R_V23, R_V23b, R_V23c, R_V23d,
|
||||
R_V24, R_V24b, R_V24c, R_V24d, R_V25, R_V25b, R_V25c, R_V25d,
|
||||
R_V26, R_V26b, R_V26c, R_V26d, R_V27, R_V27b, R_V27c, R_V27d,
|
||||
R_V28, R_V28b, R_V28c, R_V28d, R_V29, R_V29b, R_V29c, R_V29d,
|
||||
R_V30, R_V30b, R_V30c, R_V30d, R_V31, R_V31b, R_V31c, R_V31d,
|
||||
R_V0, R_V0b, R_V0c, R_V0d, R_V1, R_V1b, R_V1c, R_V1d,
|
||||
R_V2, R_V2b, R_V2c, R_V2d, R_V3, R_V3b, R_V3c, R_V3d,
|
||||
R_V4, R_V4b, R_V4c, R_V4d, R_V5, R_V5b, R_V5c, R_V5d,
|
||||
R_V6, R_V6b, R_V6c, R_V6d, R_V7, R_V7b, R_V7c, R_V7d
|
||||
);
|
||||
|
||||
// Need double-register alignment here.
|
||||
// We are already quad-register aligned because of vectors above.
|
||||
alloc_class gprs(
|
||||
R_R0, R_R0x, R_R1, R_R1x, R_R2, R_R2x, R_R3, R_R3x,
|
||||
R_R4, R_R4x, R_R5, R_R5x, R_R6, R_R6x, R_R7, R_R7x,
|
||||
R_R8, R_R8x, R_R9, R_R9x, R_R10, R_R10x, R_R11, R_R11x,
|
||||
R_R12, R_R12x, R_R13, R_R13x, R_R14, R_R14x, R_R15, R_R15x,
|
||||
R_R16, R_R16x, R_R17, R_R17x, R_R18, R_R18x, R_R19, R_R19x,
|
||||
R_R20, R_R20x, R_R21, R_R21x, R_R22, R_R22x, R_R23, R_R23x,
|
||||
R_R24, R_R24x, R_R25, R_R25x, R_R26, R_R26x, R_R27, R_R27x,
|
||||
R_R28, R_R28x, R_R29, R_R29x, R_R30, R_R30x
|
||||
);
|
||||
// Continuing with double-reigister alignment...
|
||||
alloc_class chunk2(APSR, FPSCR);
|
||||
alloc_class chunk3(R_SP, R_SPx);
|
||||
alloc_class chunk4(R_ZR, R_ZRx);
|
||||
|
||||
//----------Architecture Description Register Classes--------------------------
|
||||
// Several register classes are automatically defined based upon information in
|
||||
// this architecture description.
|
||||
// 1) reg_class inline_cache_reg ( as defined in frame section )
|
||||
// 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
|
||||
// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
|
||||
//
|
||||
|
||||
// ----------------------------
|
||||
// Integer Register Classes
|
||||
// ----------------------------
|
||||
reg_class int_reg_all(R_R0, R_R1, R_R2, R_R3, R_R4, R_R5, R_R6, R_R7,
|
||||
R_R8, R_R9, R_R10, R_R11, R_R12, R_R13, R_R14, R_R15,
|
||||
R_R16, R_R17, R_R18, R_R19, R_R20, R_R21, R_R22, R_R23,
|
||||
R_R24, R_R25, R_R26, R_R27, R_R28, R_R29, R_R30
|
||||
);
|
||||
|
||||
// Exclusions from i_reg:
|
||||
// SP (R31)
|
||||
// Rthread/R28: reserved by HotSpot to the TLS register (invariant within Java)
|
||||
reg_class int_reg %{
|
||||
return _INT_REG_mask;
|
||||
%}
|
||||
reg_class ptr_reg %{
|
||||
return _PTR_REG_mask;
|
||||
%}
|
||||
reg_class vectorx_reg %{
|
||||
return _VECTORX_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class R0_regI(R_R0);
|
||||
reg_class R1_regI(R_R1);
|
||||
reg_class R2_regI(R_R2);
|
||||
reg_class R3_regI(R_R3);
|
||||
//reg_class R12_regI(R_R12);
|
||||
|
||||
// ----------------------------
|
||||
// Pointer Register Classes
|
||||
// ----------------------------
|
||||
|
||||
// Special class for storeP instructions, which can store SP or RPC to TLS.
|
||||
// It is also used for memory addressing, allowing direct TLS addressing.
|
||||
|
||||
reg_class sp_ptr_reg %{
|
||||
return _SP_PTR_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class store_reg %{
|
||||
return _STR_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class store_ptr_reg %{
|
||||
return _STR_PTR_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class spillP_reg %{
|
||||
return _SPILLP_REG_mask;
|
||||
%}
|
||||
|
||||
// Other special pointer regs
|
||||
reg_class R0_regP(R_R0, R_R0x);
|
||||
reg_class R1_regP(R_R1, R_R1x);
|
||||
reg_class R2_regP(R_R2, R_R2x);
|
||||
reg_class Rexception_regP(R_R19, R_R19x);
|
||||
reg_class Ricklass_regP(R_R8, R_R8x);
|
||||
reg_class Rmethod_regP(R_R27, R_R27x);
|
||||
|
||||
reg_class Rthread_regP(R_R28, R_R28x);
|
||||
reg_class IP_regP(R_R16, R_R16x);
|
||||
#define RtempRegP IPRegP
|
||||
reg_class LR_regP(R_R30, R_R30x);
|
||||
|
||||
reg_class SP_regP(R_SP, R_SPx);
|
||||
reg_class FP_regP(R_R29, R_R29x);
|
||||
|
||||
reg_class ZR_regP(R_ZR, R_ZRx);
|
||||
reg_class ZR_regI(R_ZR);
|
||||
|
||||
// ----------------------------
|
||||
// Long Register Classes
|
||||
// ----------------------------
|
||||
reg_class long_reg %{ return _PTR_REG_mask; %}
|
||||
// for ldrexd, strexd: first reg of pair must be even
|
||||
reg_class long_reg_align %{ return LONG_REG_mask(); %}
|
||||
|
||||
reg_class R0_regL(R_R0,R_R0x); // arg 1 or return value
|
||||
|
||||
// ----------------------------
|
||||
// Special Class for Condition Code Flags Register
|
||||
reg_class int_flags(APSR);
|
||||
reg_class float_flags(FPSCR);
|
||||
|
||||
|
||||
// ----------------------------
|
||||
// Float Point Register Classes
|
||||
// ----------------------------
|
||||
reg_class sflt_reg_0(
|
||||
R_V0, R_V1, R_V2, R_V3, R_V4, R_V5, R_V6, R_V7,
|
||||
R_V8, R_V9, R_V10, R_V11, R_V12, R_V13, R_V14, R_V15,
|
||||
R_V16, R_V17, R_V18, R_V19, R_V20, R_V21, R_V22, R_V23,
|
||||
R_V24, R_V25, R_V26, R_V27, R_V28, R_V29, R_V30, R_V31);
|
||||
|
||||
reg_class sflt_reg %{
|
||||
return _SFLT_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class dflt_low_reg %{
|
||||
return _DFLT_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class actual_dflt_reg %{
|
||||
return _DFLT_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class vectorx_reg_0(
|
||||
R_V0, R_V1, R_V2, R_V3, R_V4, R_V5, R_V6, R_V7,
|
||||
R_V8, R_V9, R_V10, R_V11, R_V12, R_V13, R_V14, R_V15,
|
||||
R_V16, R_V17, R_V18, R_V19, R_V20, R_V21, R_V22, R_V23,
|
||||
R_V24, R_V25, R_V26, R_V27, R_V28, R_V29, R_V30, /*R_V31,*/
|
||||
R_V0b, R_V1b, R_V2b, R_V3b, R_V4b, R_V5b, R_V6b, R_V7b,
|
||||
R_V8b, R_V9b, R_V10b, R_V11b, R_V12b, R_V13b, R_V14b, R_V15b,
|
||||
R_V16b, R_V17b, R_V18b, R_V19b, R_V20b, R_V21b, R_V22b, R_V23b,
|
||||
R_V24b, R_V25b, R_V26b, R_V27b, R_V28b, R_V29b, R_V30b, /*R_V31b,*/
|
||||
R_V0c, R_V1c, R_V2c, R_V3c, R_V4c, R_V5c, R_V6c, R_V7c,
|
||||
R_V8c, R_V9c, R_V10c, R_V11c, R_V12c, R_V13c, R_V14c, R_V15c,
|
||||
R_V16c, R_V17c, R_V18c, R_V19c, R_V20c, R_V21c, R_V22c, R_V23c,
|
||||
R_V24c, R_V25c, R_V26c, R_V27c, R_V28c, R_V29c, R_V30c, /*R_V31c,*/
|
||||
R_V0d, R_V1d, R_V2d, R_V3d, R_V4d, R_V5d, R_V6d, R_V7d,
|
||||
R_V8d, R_V9d, R_V10d, R_V11d, R_V12d, R_V13d, R_V14d, R_V15d,
|
||||
R_V16d, R_V17d, R_V18d, R_V19d, R_V20d, R_V21d, R_V22d, R_V23d,
|
||||
R_V24d, R_V25d, R_V26d, R_V27d, R_V28d, R_V29d, R_V30d, /*R_V31d*/);
|
||||
|
||||
reg_class Rmemcopy_reg %{
|
||||
return _RMEMCOPY_REG_mask;
|
||||
%}
|
||||
|
||||
%}
|
||||
|
||||
source_hpp %{
|
||||
|
||||
const MachRegisterNumbers R_mem_copy_lo_num = R_V31_num;
|
||||
const MachRegisterNumbers R_mem_copy_hi_num = R_V31b_num;
|
||||
const FloatRegister Rmemcopy = V31;
|
||||
|
||||
const MachRegisterNumbers R_hf_ret_lo_num = R_V0_num;
|
||||
const MachRegisterNumbers R_hf_ret_hi_num = R_V0b_num;
|
||||
const FloatRegister Rhfret = V0;
|
||||
|
||||
extern OptoReg::Name R_Ricklass_num;
|
||||
extern OptoReg::Name R_Rmethod_num;
|
||||
extern OptoReg::Name R_tls_num;
|
||||
extern OptoReg::Name R_Rheap_base_num;
|
||||
|
||||
extern RegMask _INT_REG_mask;
|
||||
extern RegMask _PTR_REG_mask;
|
||||
extern RegMask _SFLT_REG_mask;
|
||||
extern RegMask _DFLT_REG_mask;
|
||||
extern RegMask _VECTORX_REG_mask;
|
||||
extern RegMask _RMEMCOPY_REG_mask;
|
||||
extern RegMask _SP_PTR_REG_mask;
|
||||
extern RegMask _SPILLP_REG_mask;
|
||||
extern RegMask _STR_REG_mask;
|
||||
extern RegMask _STR_PTR_REG_mask;
|
||||
|
||||
#define LDR_DOUBLE "LDR_D"
|
||||
#define LDR_FLOAT "LDR_S"
|
||||
#define STR_DOUBLE "STR_D"
|
||||
#define STR_FLOAT "STR_S"
|
||||
#define STR_64 "STR"
|
||||
#define LDR_64 "LDR"
|
||||
#define STR_32 "STR_W"
|
||||
#define LDR_32 "LDR_W"
|
||||
#define MOV_DOUBLE "FMOV_D"
|
||||
#define MOV_FLOAT "FMOV_S"
|
||||
#define FMSR "FMOV_SW"
|
||||
#define FMRS "FMOV_WS"
|
||||
#define LDREX "ldxr "
|
||||
#define STREX "stxr "
|
||||
|
||||
#define str_64 str
|
||||
#define ldr_64 ldr
|
||||
#define ldr_32 ldr_w
|
||||
#define ldrex ldxr
|
||||
#define strex stxr
|
||||
|
||||
#define fmsr fmov_sw
|
||||
#define fmrs fmov_ws
|
||||
#define fconsts fmov_s
|
||||
#define fconstd fmov_d
|
||||
|
||||
static inline bool is_uimm12(jlong imm, int shift) {
|
||||
return Assembler::is_unsigned_imm_in_range(imm, 12, shift);
|
||||
}
|
||||
|
||||
static inline bool is_memoryD(int offset) {
|
||||
int scale = 3; // LogBytesPerDouble
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryfp(int offset) {
|
||||
int scale = LogBytesPerInt; // include 32-bit word accesses
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryI(int offset) {
|
||||
int scale = LogBytesPerInt;
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryP(int offset) {
|
||||
int scale = LogBytesPerWord;
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryHD(int offset) {
|
||||
int scale = LogBytesPerInt; // include 32-bit word accesses
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
uintx limmL_low(uintx imm, int n);
|
||||
|
||||
static inline bool Xis_aimm(int imm) {
|
||||
return Assembler::ArithmeticImmediate(imm).is_encoded();
|
||||
}
|
||||
|
||||
static inline bool is_aimm(intptr_t imm) {
|
||||
return Assembler::ArithmeticImmediate(imm).is_encoded();
|
||||
}
|
||||
|
||||
static inline bool is_limmL(uintptr_t imm) {
|
||||
return Assembler::LogicalImmediate(imm).is_encoded();
|
||||
}
|
||||
|
||||
static inline bool is_limmL_low(intptr_t imm, int n) {
|
||||
return is_limmL(limmL_low(imm, n));
|
||||
}
|
||||
|
||||
static inline bool is_limmI(jint imm) {
|
||||
return Assembler::LogicalImmediate(imm, true).is_encoded();
|
||||
}
|
||||
|
||||
static inline uintx limmI_low(jint imm, int n) {
|
||||
return limmL_low(imm, n);
|
||||
}
|
||||
|
||||
static inline bool is_limmI_low(jint imm, int n) {
|
||||
return is_limmL_low(imm, n);
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
// Given a register encoding, produce a Integer Register object
|
||||
static Register reg_to_register_object(int register_encoding) {
|
||||
assert(R0->encoding() == R_R0_enc && R30->encoding() == R_R30_enc, "right coding");
|
||||
assert(Rthread->encoding() == R_R28_enc, "right coding");
|
||||
assert(SP->encoding() == R_SP_enc, "right coding");
|
||||
return as_Register(register_encoding);
|
||||
}
|
||||
|
||||
// Given a register encoding, produce a single-precision Float Register object
|
||||
static FloatRegister reg_to_FloatRegister_object(int register_encoding) {
|
||||
assert(V0->encoding() == R_V0_enc && V31->encoding() == R_V31_enc, "right coding");
|
||||
return as_FloatRegister(register_encoding);
|
||||
}
|
||||
|
||||
RegMask _INT_REG_mask;
|
||||
RegMask _PTR_REG_mask;
|
||||
RegMask _SFLT_REG_mask;
|
||||
RegMask _DFLT_REG_mask;
|
||||
RegMask _VECTORX_REG_mask;
|
||||
RegMask _RMEMCOPY_REG_mask;
|
||||
RegMask _SP_PTR_REG_mask;
|
||||
RegMask _SPILLP_REG_mask;
|
||||
RegMask _STR_REG_mask;
|
||||
RegMask _STR_PTR_REG_mask;
|
||||
|
||||
OptoReg::Name R_Ricklass_num = -1;
|
||||
OptoReg::Name R_Rmethod_num = -1;
|
||||
OptoReg::Name R_tls_num = -1;
|
||||
OptoReg::Name R_Rtemp_num = -1;
|
||||
OptoReg::Name R_Rheap_base_num = -1;
|
||||
|
||||
static int mov_oop_size = -1;
|
||||
|
||||
#ifdef ASSERT
|
||||
static bool same_mask(const RegMask &a, const RegMask &b) {
|
||||
RegMask a_sub_b = a; a_sub_b.SUBTRACT(b);
|
||||
RegMask b_sub_a = b; b_sub_a.SUBTRACT(a);
|
||||
return a_sub_b.Size() == 0 && b_sub_a.Size() == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Compile::pd_compiler2_init() {
|
||||
|
||||
R_Ricklass_num = OptoReg::as_OptoReg(Ricklass->as_VMReg());
|
||||
R_Rmethod_num = OptoReg::as_OptoReg(Rmethod->as_VMReg());
|
||||
R_tls_num = OptoReg::as_OptoReg(Rthread->as_VMReg());
|
||||
R_Rtemp_num = OptoReg::as_OptoReg(Rtemp->as_VMReg());
|
||||
R_Rheap_base_num = OptoReg::as_OptoReg(Rheap_base->as_VMReg());
|
||||
|
||||
_INT_REG_mask = _INT_REG_ALL_mask;
|
||||
_INT_REG_mask.Remove(R_tls_num);
|
||||
_INT_REG_mask.Remove(R_SP_num);
|
||||
if (UseCompressedOops) {
|
||||
_INT_REG_mask.Remove(R_Rheap_base_num);
|
||||
}
|
||||
// Remove Rtemp because safepoint poll can trash it
|
||||
// (see SharedRuntime::generate_handler_blob)
|
||||
_INT_REG_mask.Remove(R_Rtemp_num);
|
||||
|
||||
_PTR_REG_mask = _INT_REG_mask;
|
||||
_PTR_REG_mask.smear_to_sets(2);
|
||||
|
||||
// STR_REG = INT_REG+ZR
|
||||
// SPILLP_REG = INT_REG+SP
|
||||
// SP_PTR_REG = INT_REG+SP+TLS
|
||||
_STR_REG_mask = _INT_REG_mask;
|
||||
_SP_PTR_REG_mask = _STR_REG_mask;
|
||||
_STR_REG_mask.Insert(R_ZR_num);
|
||||
_SP_PTR_REG_mask.Insert(R_SP_num);
|
||||
_SPILLP_REG_mask = _SP_PTR_REG_mask;
|
||||
_SP_PTR_REG_mask.Insert(R_tls_num);
|
||||
_STR_PTR_REG_mask = _STR_REG_mask;
|
||||
_STR_PTR_REG_mask.smear_to_sets(2);
|
||||
_SP_PTR_REG_mask.smear_to_sets(2);
|
||||
_SPILLP_REG_mask.smear_to_sets(2);
|
||||
|
||||
_RMEMCOPY_REG_mask = RegMask(R_mem_copy_lo_num);
|
||||
assert(OptoReg::as_OptoReg(Rmemcopy->as_VMReg()) == R_mem_copy_lo_num, "!");
|
||||
|
||||
_SFLT_REG_mask = _SFLT_REG_0_mask;
|
||||
_SFLT_REG_mask.SUBTRACT(_RMEMCOPY_REG_mask);
|
||||
_DFLT_REG_mask = _SFLT_REG_mask;
|
||||
_DFLT_REG_mask.smear_to_sets(2);
|
||||
_VECTORX_REG_mask = _SFLT_REG_mask;
|
||||
_VECTORX_REG_mask.smear_to_sets(4);
|
||||
assert(same_mask(_VECTORX_REG_mask, _VECTORX_REG_0_mask), "!");
|
||||
|
||||
#ifdef ASSERT
|
||||
RegMask r((RegMask *)&SFLT_REG_mask());
|
||||
r.smear_to_sets(2);
|
||||
assert(same_mask(r, _DFLT_REG_mask), "!");
|
||||
#endif
|
||||
|
||||
if (VM_Version::prefer_moves_over_load_literal()) {
|
||||
mov_oop_size = 4;
|
||||
} else {
|
||||
mov_oop_size = 1;
|
||||
}
|
||||
|
||||
assert(Matcher::interpreter_method_oop_reg_encode() == Rmethod->encoding(), "should be");
|
||||
}
|
||||
|
||||
uintx limmL_low(uintx imm, int n) {
|
||||
// 1: try as is
|
||||
if (is_limmL(imm)) {
|
||||
return imm;
|
||||
}
|
||||
// 2: try low bits + all 0's
|
||||
uintx imm0 = imm & right_n_bits(n);
|
||||
if (is_limmL(imm0)) {
|
||||
return imm0;
|
||||
}
|
||||
// 3: try low bits + all 1's
|
||||
uintx imm1 = imm0 | left_n_bits(BitsPerWord - n);
|
||||
if (is_limmL(imm1)) {
|
||||
return imm1;
|
||||
}
|
||||
#if 0
|
||||
// 4: try low bits replicated
|
||||
int field = 1 << log2_intptr(n + n - 1);
|
||||
assert(field >= n, "!");
|
||||
assert(field / n == 1, "!");
|
||||
intptr_t immr = immx;
|
||||
while (field < BitsPerWord) {
|
||||
intrptr_t bits = immr & right_n_bits(field);
|
||||
immr = bits | (bits << field);
|
||||
field = field << 1;
|
||||
}
|
||||
// replicate at power-of-2 boundary
|
||||
if (is_limmL(immr)) {
|
||||
return immr;
|
||||
}
|
||||
#endif
|
||||
return imm;
|
||||
}
|
||||
|
||||
// Convert the raw encoding form into the form expected by the
|
||||
// constructor for Address.
|
||||
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
|
||||
RelocationHolder rspec;
|
||||
if (disp_reloc != relocInfo::none) {
|
||||
rspec = Relocation::spec_simple(disp_reloc);
|
||||
}
|
||||
|
||||
Register rbase = (base == 0xff) ? SP : as_Register(base);
|
||||
if (index != 0xff) {
|
||||
Register rindex = as_Register(index);
|
||||
if (disp == 0x7fffffff) { // special value to indicate sign-extend
|
||||
Address madr(rbase, rindex, ex_sxtw, scale);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
} else {
|
||||
assert(disp == 0, "unsupported");
|
||||
Address madr(rbase, rindex, ex_lsl, scale);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
}
|
||||
} else {
|
||||
assert(scale == 0, "not supported");
|
||||
Address madr(rbase, disp);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
}
|
||||
}
|
||||
|
||||
// Location of compiled Java return values. Same as C
|
||||
OptoRegPair c2::return_value(int ideal_reg) {
|
||||
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
|
||||
static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, R_R0_num, R_R0_num, R_hf_ret_lo_num, R_hf_ret_lo_num, R_R0_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, R_R0x_num, OptoReg::Bad, R_hf_ret_hi_num, R_R0x_num };
|
||||
return OptoRegPair( hi[ideal_reg], lo[ideal_reg]);
|
||||
}
|
||||
|
||||
// !!!!! Special hack to get all type of calls to specify the byte offset
|
||||
// from the start of the call to the point where the return address
|
||||
// will point.
|
||||
|
||||
int MachCallStaticJavaNode::ret_addr_offset() {
|
||||
bool far = (_method == NULL) ? maybe_far_call(this) : !cache_reachable();
|
||||
bool patchable = _method != NULL;
|
||||
int call_size = MacroAssembler::call_size(entry_point(), far, patchable);
|
||||
return (call_size + (_method_handle_invoke ? 1 : 0)) * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MachCallDynamicJavaNode::ret_addr_offset() {
|
||||
bool far = !cache_reachable();
|
||||
int call_size = MacroAssembler::call_size(entry_point(), far, true);
|
||||
return (mov_oop_size + call_size) * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MachCallRuntimeNode::ret_addr_offset() {
|
||||
int call_size = 0;
|
||||
// TODO: check if Leaf nodes also need this
|
||||
if (!is_MachCallLeaf()) {
|
||||
// adr $temp, ret_addr
|
||||
// str $temp, [SP + last_java_pc]
|
||||
call_size += 2;
|
||||
}
|
||||
// bl or mov_slow; blr
|
||||
bool far = maybe_far_call(this);
|
||||
call_size += MacroAssembler::call_size(entry_point(), far, false);
|
||||
return call_size * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// The intptr_t operand types, defined by textual substitution.
|
||||
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
|
||||
#define immX immL
|
||||
#define iRegX iRegL
|
||||
#define aimmX aimmL
|
||||
#define limmX limmL
|
||||
#define immX9 immL9
|
||||
#define LShiftX LShiftL
|
||||
#define shimmX immU6
|
||||
|
||||
#define store_RegLd store_RegL
|
||||
|
||||
//----------ATTRIBUTES---------------------------------------------------------
|
||||
//----------Operand Attributes-------------------------------------------------
|
||||
op_attrib op_cost(1); // Required cost attribute
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
// instruction definitions.
|
||||
|
||||
//----------Simple Operands----------------------------------------------------
|
||||
// Immediate Operands
|
||||
|
||||
// Integer Immediate: 9-bit (including sign bit), so same as immI8?
|
||||
// FIXME: simm9 allows -256, but immI8 doesn't...
|
||||
operand simm9() %{
|
||||
predicate(Assembler::is_imm_in_range(n->get_int(), 9, 0));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
|
||||
operand uimm12() %{
|
||||
predicate(Assembler::is_unsigned_imm_in_range(n->get_int(), 12, 0));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand aimmP() %{
|
||||
predicate(n->get_ptr() == 0 || (is_aimm(n->get_ptr()) && ((ConPNode*)n)->type()->reloc() == relocInfo::none));
|
||||
match(ConP);
|
||||
|
||||
op_cost(0);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Long Immediate: 12-bit - for addressing mode
|
||||
operand immL12() %{
|
||||
predicate((-4096 < n->get_long()) && (n->get_long() < 4096));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Long Immediate: 9-bit - for addressing mode
|
||||
operand immL9() %{
|
||||
predicate((-256 <= n->get_long()) && (n->get_long() < 256));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIMov() %{
|
||||
predicate(n->get_int() >> 16 == 0);
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLMov() %{
|
||||
predicate(n->get_long() >> 16 == 0);
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12() %{
|
||||
predicate(is_uimm12(n->get_long(), 0));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x2() %{
|
||||
predicate(is_uimm12(n->get_long(), 1));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x4() %{
|
||||
predicate(is_uimm12(n->get_long(), 2));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x8() %{
|
||||
predicate(is_uimm12(n->get_long(), 3));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x16() %{
|
||||
predicate(is_uimm12(n->get_long(), 4));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Used for long shift
|
||||
operand immU6() %{
|
||||
predicate(0 <= n->get_int() && (n->get_int() <= 63));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Used for register extended shift
|
||||
operand immI_0_4() %{
|
||||
predicate(0 <= n->get_int() && (n->get_int() <= 4));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Compressed Pointer Register
|
||||
operand iRegN() %{
|
||||
constraint(ALLOC_IN_RC(int_reg));
|
||||
match(RegN);
|
||||
match(ZRRegN);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand SPRegP() %{
|
||||
constraint(ALLOC_IN_RC(SP_regP));
|
||||
match(RegP);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegP() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regP));
|
||||
match(RegP);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegL() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regP));
|
||||
match(RegL);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegI() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regI));
|
||||
match(RegI);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegN() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regI));
|
||||
match(RegN);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
@ -40,29 +40,14 @@ enum AsmShift {
|
||||
lsl, lsr, asr, ror
|
||||
};
|
||||
|
||||
#ifdef AARCH64
|
||||
enum AsmExtendOp {
|
||||
ex_uxtb, ex_uxth, ex_uxtw, ex_uxtx,
|
||||
ex_sxtb, ex_sxth, ex_sxtw, ex_sxtx,
|
||||
|
||||
ex_lsl = ex_uxtx
|
||||
};
|
||||
#endif
|
||||
|
||||
enum AsmOffset {
|
||||
#ifdef AARCH64
|
||||
basic_offset = 0b00,
|
||||
pre_indexed = 0b11,
|
||||
post_indexed = 0b01
|
||||
#else
|
||||
basic_offset = 1 << 24,
|
||||
pre_indexed = 1 << 24 | 1 << 21,
|
||||
post_indexed = 0
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#ifndef AARCH64
|
||||
enum AsmWriteback {
|
||||
no_writeback,
|
||||
writeback
|
||||
@ -72,7 +57,6 @@ enum AsmOffsetOp {
|
||||
sub_offset = 0,
|
||||
add_offset = 1
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
// ARM Addressing Modes 2 and 3 - Load and store
|
||||
@ -84,21 +68,13 @@ class Address {
|
||||
AsmOffset _mode;
|
||||
RelocationHolder _rspec;
|
||||
int _shift_imm;
|
||||
#ifdef AARCH64
|
||||
AsmExtendOp _extend;
|
||||
#else
|
||||
AsmShift _shift;
|
||||
AsmOffsetOp _offset_op;
|
||||
|
||||
static inline int abs(int x) { return x < 0 ? -x : x; }
|
||||
static inline int up (int x) { return x < 0 ? 0 : 1; }
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
static const AsmExtendOp LSL = ex_lsl;
|
||||
#else
|
||||
static const AsmShift LSL = lsl;
|
||||
#endif
|
||||
|
||||
public:
|
||||
Address() : _base(noreg) {}
|
||||
@ -109,12 +85,8 @@ class Address {
|
||||
_disp = offset;
|
||||
_mode = mode;
|
||||
_shift_imm = 0;
|
||||
#ifdef AARCH64
|
||||
_extend = ex_lsl;
|
||||
#else
|
||||
_shift = lsl;
|
||||
_offset_op = add_offset;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -124,27 +96,11 @@ class Address {
|
||||
_disp = in_bytes(offset);
|
||||
_mode = mode;
|
||||
_shift_imm = 0;
|
||||
#ifdef AARCH64
|
||||
_extend = ex_lsl;
|
||||
#else
|
||||
_shift = lsl;
|
||||
_offset_op = add_offset;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
Address(Register rn, Register rm, AsmExtendOp extend = ex_lsl, int shift_imm = 0) {
|
||||
assert ((extend == ex_uxtw) || (extend == ex_lsl) || (extend == ex_sxtw) || (extend == ex_sxtx), "invalid extend for address mode");
|
||||
assert ((0 <= shift_imm) && (shift_imm <= 4), "shift amount is out of range");
|
||||
_base = rn;
|
||||
_index = rm;
|
||||
_disp = 0;
|
||||
_mode = basic_offset;
|
||||
_extend = extend;
|
||||
_shift_imm = shift_imm;
|
||||
}
|
||||
#else
|
||||
Address(Register rn, Register rm, AsmShift shift = lsl,
|
||||
int shift_imm = 0, AsmOffset mode = basic_offset,
|
||||
AsmOffsetOp offset_op = add_offset) {
|
||||
@ -181,7 +137,6 @@ class Address {
|
||||
_mode = basic_offset;
|
||||
_offset_op = add_offset;
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
// [base + index * wordSize]
|
||||
static Address indexed_ptr(Register base, Register index) {
|
||||
@ -211,25 +166,6 @@ class Address {
|
||||
return a;
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
int encoding_simd() const {
|
||||
assert(_index != SP, "encoding constraint");
|
||||
assert(_disp == 0 || _mode == post_indexed, "encoding constraint");
|
||||
assert(_index == noreg || _mode == basic_offset, "encoding constraint");
|
||||
assert(_mode == basic_offset || _mode == post_indexed, "encoding constraint");
|
||||
assert(_extend == ex_lsl, "encoding constraint");
|
||||
int index;
|
||||
if (_index == noreg) {
|
||||
if (_mode == post_indexed)
|
||||
index = 0b100 << 5 | 31;
|
||||
else
|
||||
index = 0;
|
||||
} else {
|
||||
index = 0b100 << 5 | _index->encoding();
|
||||
}
|
||||
return index << 16 | _base->encoding_with_sp() << 5;
|
||||
}
|
||||
#else /* !AARCH64 */
|
||||
int encoding2() const {
|
||||
assert(_mode == basic_offset || _base != PC, "unpredictable instruction");
|
||||
if (_index == noreg) {
|
||||
@ -287,7 +223,6 @@ class Address {
|
||||
|
||||
return _base->encoding() << 16 | index;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
Register base() const {
|
||||
return _base;
|
||||
@ -309,11 +244,6 @@ class Address {
|
||||
return _shift_imm;
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
AsmExtendOp extend() const {
|
||||
return _extend;
|
||||
}
|
||||
#else
|
||||
AsmShift shift() const {
|
||||
return _shift;
|
||||
}
|
||||
@ -321,7 +251,6 @@ class Address {
|
||||
AsmOffsetOp offset_op() const {
|
||||
return _offset_op;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool uses(Register reg) const { return _base == reg || _index == reg; }
|
||||
|
||||
@ -394,11 +323,7 @@ class VFP {
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
#include "assembler_arm_64.hpp"
|
||||
#else
|
||||
#include "assembler_arm_32.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
#endif // CPU_ARM_VM_ASSEMBLER_ARM_HPP
|
||||
|
@ -1,186 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "ci/ciEnv.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/templateInterpreterGenerator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Returns whether given imm has equal bit fields <0:size-1> and <size:2*size-1>.
|
||||
inline bool Assembler::LogicalImmediate::has_equal_subpatterns(uintx imm, int size) {
|
||||
uintx mask = right_n_bits(size);
|
||||
uintx subpattern1 = mask_bits(imm, mask);
|
||||
uintx subpattern2 = mask_bits(imm >> size, mask);
|
||||
return subpattern1 == subpattern2;
|
||||
}
|
||||
|
||||
// Returns least size that is a power of two from 2 to 64 with the proviso that given
|
||||
// imm is composed of repeating patterns of this size.
|
||||
inline int Assembler::LogicalImmediate::least_pattern_size(uintx imm) {
|
||||
int size = BitsPerWord;
|
||||
while (size > 2 && has_equal_subpatterns(imm, size >> 1)) {
|
||||
size >>= 1;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
// Returns count of set bits in given imm. Based on variable-precision SWAR algorithm.
|
||||
inline int Assembler::LogicalImmediate::population_count(uintx x) {
|
||||
x -= ((x >> 1) & 0x5555555555555555L);
|
||||
x = (((x >> 2) & 0x3333333333333333L) + (x & 0x3333333333333333L));
|
||||
x = (((x >> 4) + x) & 0x0f0f0f0f0f0f0f0fL);
|
||||
x += (x >> 8);
|
||||
x += (x >> 16);
|
||||
x += (x >> 32);
|
||||
return(x & 0x7f);
|
||||
}
|
||||
|
||||
// Let given x be <A:B> where B = 0 and least bit of A = 1. Returns <A:C>, where C is B-size set bits.
|
||||
inline uintx Assembler::LogicalImmediate::set_least_zeroes(uintx x) {
|
||||
return x | (x - 1);
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
// Restores immediate by encoded bit masks.
|
||||
uintx Assembler::LogicalImmediate::decode() {
|
||||
assert (_encoded, "should be");
|
||||
|
||||
int len_code = (_immN << 6) | ((~_imms) & 0x3f);
|
||||
assert (len_code != 0, "should be");
|
||||
|
||||
int len = 6;
|
||||
while (!is_set_nth_bit(len_code, len)) len--;
|
||||
int esize = 1 << len;
|
||||
assert (len > 0, "should be");
|
||||
assert ((_is32bit ? 32 : 64) >= esize, "should be");
|
||||
|
||||
int levels = right_n_bits(len);
|
||||
int S = _imms & levels;
|
||||
int R = _immr & levels;
|
||||
|
||||
assert (S != levels, "should be");
|
||||
|
||||
uintx welem = right_n_bits(S + 1);
|
||||
uintx wmask = (R == 0) ? welem : ((welem >> R) | (welem << (esize - R)));
|
||||
|
||||
for (int size = esize; size < 64; size <<= 1) {
|
||||
wmask |= (wmask << size);
|
||||
}
|
||||
|
||||
return wmask;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Constructs LogicalImmediate by given imm. Figures out if given imm can be used in AArch64 logical
|
||||
// instructions (AND, ANDS, EOR, ORR) and saves its encoding.
|
||||
void Assembler::LogicalImmediate::construct(uintx imm, bool is32) {
|
||||
_is32bit = is32;
|
||||
|
||||
if (is32) {
|
||||
assert(((imm >> 32) == 0) || (((intx)imm >> 31) == -1), "32-bit immediate is out of range");
|
||||
|
||||
// Replicate low 32 bits.
|
||||
imm &= 0xffffffff;
|
||||
imm |= imm << 32;
|
||||
}
|
||||
|
||||
// All-zeroes and all-ones can not be encoded.
|
||||
if (imm != 0 && (~imm != 0)) {
|
||||
|
||||
// Let LPS (least pattern size) be the least size (power of two from 2 to 64) of repeating
|
||||
// patterns in the immediate. If immediate value can be encoded, it is encoded by pattern
|
||||
// of exactly LPS size (due to structure of valid patterns). In order to verify
|
||||
// that immediate value can be encoded, LPS is calculated and <LPS-1:0> bits of immediate
|
||||
// are verified to be valid pattern.
|
||||
int lps = least_pattern_size(imm);
|
||||
uintx lps_mask = right_n_bits(lps);
|
||||
|
||||
// A valid pattern has one of the following forms:
|
||||
// | 0 x A | 1 x B | 0 x C |, where B > 0 and C > 0, or
|
||||
// | 1 x A | 0 x B | 1 x C |, where B > 0 and C > 0.
|
||||
// For simplicity, the second form of the pattern is inverted into the first form.
|
||||
bool inverted = imm & 0x1;
|
||||
uintx pattern = (inverted ? ~imm : imm) & lps_mask;
|
||||
|
||||
// | 0 x A | 1 x (B + C) |
|
||||
uintx without_least_zeroes = set_least_zeroes(pattern);
|
||||
|
||||
// Pattern is valid iff without least zeroes it is a power of two - 1.
|
||||
if ((without_least_zeroes & (without_least_zeroes + 1)) == 0) {
|
||||
|
||||
// Count B as population count of pattern.
|
||||
int bits_count = population_count(pattern);
|
||||
|
||||
// Count B+C as population count of pattern without least zeroes
|
||||
int left_range = population_count(without_least_zeroes);
|
||||
|
||||
// S-prefix is a part of imms field which encodes LPS.
|
||||
// LPS | S prefix
|
||||
// 64 | not defined
|
||||
// 32 | 0b0
|
||||
// 16 | 0b10
|
||||
// 8 | 0b110
|
||||
// 4 | 0b1110
|
||||
// 2 | 0b11110
|
||||
int s_prefix = (lps == 64) ? 0 : ~set_least_zeroes(lps) & 0x3f;
|
||||
|
||||
// immN bit is set iff LPS == 64.
|
||||
_immN = (lps == 64) ? 1 : 0;
|
||||
assert (!is32 || (_immN == 0), "32-bit immediate should be encoded with zero N-bit");
|
||||
|
||||
// immr is the rotation size.
|
||||
_immr = lps + (inverted ? 0 : bits_count) - left_range;
|
||||
|
||||
// imms is the field that encodes bits count and S-prefix.
|
||||
_imms = ((inverted ? (lps - bits_count) : bits_count) - 1) | s_prefix;
|
||||
|
||||
_encoded = true;
|
||||
assert (decode() == imm, "illegal encoding");
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_encoded = false;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -67,9 +67,6 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
#ifdef AARCH64
|
||||
__ NOT_TESTED();
|
||||
#endif
|
||||
__ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
@ -86,9 +83,6 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
}
|
||||
|
||||
if (_throw_index_out_of_bounds_exception) {
|
||||
#ifdef AARCH64
|
||||
__ NOT_TESTED();
|
||||
#endif
|
||||
__ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
|
||||
} else {
|
||||
__ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
|
||||
@ -208,16 +202,12 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
|
||||
const Register lock_reg = _lock_reg->as_pointer_register();
|
||||
|
||||
ce->verify_reserved_argument_area_size(2);
|
||||
#ifdef AARCH64
|
||||
__ stp(obj_reg, lock_reg, Address(SP));
|
||||
#else
|
||||
if (obj_reg < lock_reg) {
|
||||
__ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
|
||||
} else {
|
||||
__ str(obj_reg, Address(SP));
|
||||
__ str(lock_reg, Address(SP, BytesPerWord));
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
|
||||
Runtime1::monitorenter_id :
|
||||
@ -259,7 +249,7 @@ void PatchingStub::align_patch_site(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
const int patchable_instruction_offset = AARCH64_ONLY(NativeInstruction::instruction_size) NOT_AARCH64(0);
|
||||
const int patchable_instruction_offset = 0;
|
||||
|
||||
assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
|
||||
"not enough room for call");
|
||||
@ -267,31 +257,17 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
Label call_patch;
|
||||
bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
|
||||
|
||||
#ifdef AARCH64
|
||||
assert(nativeInstruction_at(_pc_start)->is_nop(), "required for MT safe patching");
|
||||
|
||||
// Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned.
|
||||
__ align(wordSize);
|
||||
#endif // AARCH64
|
||||
|
||||
if (is_load NOT_AARCH64(&& !VM_Version::supports_movw())) {
|
||||
if (is_load && !VM_Version::supports_movw()) {
|
||||
address start = __ pc();
|
||||
|
||||
// The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
|
||||
// without creating relocation info entry.
|
||||
#ifdef AARCH64
|
||||
// Extra nop for MT safe patching
|
||||
__ nop();
|
||||
#endif // AARCH64
|
||||
|
||||
assert((__ pc() - start) == patchable_instruction_offset, "should be");
|
||||
#ifdef AARCH64
|
||||
__ ldr(_obj, __ pc());
|
||||
#else
|
||||
__ ldr(_obj, Address(PC));
|
||||
// Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
|
||||
__ nop();
|
||||
#endif // AARCH64
|
||||
|
||||
#ifdef ASSERT
|
||||
for (int i = 0; i < _bytes_to_copy; i++) {
|
||||
|
@ -47,9 +47,9 @@ enum {
|
||||
|
||||
// registers
|
||||
enum {
|
||||
pd_nof_cpu_regs_frame_map = AARCH64_ONLY(33) NOT_AARCH64(16), // number of registers used during code emission
|
||||
pd_nof_caller_save_cpu_regs_frame_map = AARCH64_ONLY(27) NOT_AARCH64(10), // number of registers killed by calls
|
||||
pd_nof_cpu_regs_reg_alloc = AARCH64_ONLY(27) NOT_AARCH64(10), // number of registers that are visible to register allocator (including Rheap_base which is visible only if compressed pointers are not enabled)
|
||||
pd_nof_cpu_regs_frame_map = 16, // number of registers used during code emission
|
||||
pd_nof_caller_save_cpu_regs_frame_map = 10, // number of registers killed by calls
|
||||
pd_nof_cpu_regs_reg_alloc = 10, // number of registers that are visible to register allocator (including Rheap_base which is visible only if compressed pointers are not enabled)
|
||||
pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan
|
||||
pd_nof_cpu_regs_processed_in_linearscan = pd_nof_cpu_regs_reg_alloc + 1, // number of registers processed in linear scan; includes LR as it is used as temporary register in c1_LIRGenerator_arm
|
||||
pd_first_cpu_reg = 0,
|
||||
@ -57,7 +57,7 @@ enum {
|
||||
|
||||
pd_nof_fpu_regs_frame_map = VFP(32) SOFT(0), // number of float registers used during code emission
|
||||
pd_nof_caller_save_fpu_regs_frame_map = VFP(32) SOFT(0), // number of float registers killed by calls
|
||||
pd_nof_fpu_regs_reg_alloc = AARCH64_ONLY(32) NOT_AARCH64(VFP(30) SOFT(0)), // number of float registers that are visible to register allocator
|
||||
pd_nof_fpu_regs_reg_alloc = VFP(30) SOFT(0), // number of float registers that are visible to register allocator
|
||||
pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of float registers visible to linear scan
|
||||
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
|
||||
pd_last_fpu_reg = pd_first_fpu_reg + pd_nof_fpu_regs_frame_map - 1,
|
||||
@ -74,11 +74,7 @@ enum {
|
||||
pd_float_saved_as_double = false
|
||||
};
|
||||
|
||||
#ifdef AARCH64
|
||||
#define PATCHED_ADDR 0xff8
|
||||
#else
|
||||
#define PATCHED_ADDR (204)
|
||||
#endif
|
||||
#define CARDTABLEBARRIERSET_POST_BARRIER_HELPER
|
||||
#define GENERATE_ADDRESS_IS_PREFERRED
|
||||
|
||||
|
@ -49,9 +49,6 @@ LIR_Opr FrameMap::R3_metadata_opr;
|
||||
LIR_Opr FrameMap::R4_metadata_opr;
|
||||
LIR_Opr FrameMap::R5_metadata_opr;
|
||||
|
||||
#ifdef AARCH64
|
||||
LIR_Opr FrameMap::ZR_opr;
|
||||
#endif // AARCH64
|
||||
|
||||
LIR_Opr FrameMap::LR_opr;
|
||||
LIR_Opr FrameMap::LR_oop_opr;
|
||||
@ -82,12 +79,7 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
|
||||
} else if (r_1->is_Register()) {
|
||||
Register reg = r_1->as_Register();
|
||||
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
|
||||
#ifdef AARCH64
|
||||
assert(r_1->next() == r_2, "should be the same");
|
||||
opr = as_long_opr(reg);
|
||||
#else
|
||||
opr = as_long_opr(reg, r_2->as_Register());
|
||||
#endif
|
||||
} else if (type == T_OBJECT || type == T_ARRAY) {
|
||||
opr = as_oop_opr(reg);
|
||||
} else if (type == T_METADATA) {
|
||||
@ -115,20 +107,10 @@ void FrameMap::initialize() {
|
||||
int rnum = 0;
|
||||
|
||||
// Registers used for allocation
|
||||
#ifdef AARCH64
|
||||
assert(Rthread == R28 && Rheap_base == R27 && Rtemp == R16, "change the code here");
|
||||
for (i = 0; i < 16; i++) {
|
||||
map_register(rnum++, as_Register(i));
|
||||
}
|
||||
for (i = 17; i < 28; i++) {
|
||||
map_register(rnum++, as_Register(i));
|
||||
}
|
||||
#else
|
||||
assert(Rthread == R10 && Rtemp == R12, "change the code here");
|
||||
for (i = 0; i < 10; i++) {
|
||||
map_register(rnum++, as_Register(i));
|
||||
}
|
||||
#endif // AARCH64
|
||||
assert(rnum == pd_nof_cpu_regs_reg_alloc, "should be");
|
||||
|
||||
// Registers not used for allocation
|
||||
@ -139,11 +121,7 @@ void FrameMap::initialize() {
|
||||
map_register(rnum++, Rthread);
|
||||
map_register(rnum++, FP); // ARM32: R7 or R11
|
||||
map_register(rnum++, SP);
|
||||
#ifdef AARCH64
|
||||
map_register(rnum++, ZR);
|
||||
#else
|
||||
map_register(rnum++, PC);
|
||||
#endif
|
||||
assert(rnum == pd_nof_cpu_regs_frame_map, "should be");
|
||||
|
||||
_init_done = true;
|
||||
@ -155,9 +133,6 @@ void FrameMap::initialize() {
|
||||
R4_opr = as_opr(R4); R4_oop_opr = as_oop_opr(R4); R4_metadata_opr = as_metadata_opr(R4);
|
||||
R5_opr = as_opr(R5); R5_oop_opr = as_oop_opr(R5); R5_metadata_opr = as_metadata_opr(R5);
|
||||
|
||||
#ifdef AARCH64
|
||||
ZR_opr = as_opr(ZR);
|
||||
#endif // AARCH64
|
||||
|
||||
LR_opr = as_opr(LR);
|
||||
LR_oop_opr = as_oop_opr(LR);
|
||||
@ -169,11 +144,6 @@ void FrameMap::initialize() {
|
||||
// LIR operands for result
|
||||
Int_result_opr = R0_opr;
|
||||
Object_result_opr = R0_oop_opr;
|
||||
#ifdef AARCH64
|
||||
Long_result_opr = as_long_opr(R0);
|
||||
Float_result_opr = as_float_opr(S0);
|
||||
Double_result_opr = as_double_opr(D0);
|
||||
#else
|
||||
Long_result_opr = as_long_opr(R0, R1);
|
||||
#ifdef __ABI_HARD__
|
||||
Float_result_opr = as_float_opr(S0);
|
||||
@ -182,7 +152,6 @@ void FrameMap::initialize() {
|
||||
Float_result_opr = LIR_OprFact::single_softfp(0);
|
||||
Double_result_opr = LIR_OprFact::double_softfp(0, 1);
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
|
||||
Exception_oop_opr = as_oop_opr(Rexception_obj);
|
||||
Exception_pc_opr = as_opr(Rexception_pc);
|
||||
@ -222,7 +191,7 @@ bool FrameMap::validate_frame() {
|
||||
}
|
||||
java_index += type2size[opr->type()];
|
||||
}
|
||||
return max_offset < AARCH64_ONLY(16384) NOT_AARCH64(4096); // TODO-AARCH64 check that LIRAssembler does not generate load/store of byte and half-word with SP as address base
|
||||
return max_offset < 4096;
|
||||
}
|
||||
|
||||
VMReg FrameMap::fpu_regname(int n) {
|
||||
|
@ -54,9 +54,6 @@
|
||||
static LIR_Opr R4_metadata_opr;
|
||||
static LIR_Opr R5_metadata_opr;
|
||||
|
||||
#ifdef AARCH64
|
||||
static LIR_Opr ZR_opr;
|
||||
#endif // AARCH64
|
||||
|
||||
static LIR_Opr LR_opr;
|
||||
static LIR_Opr LR_oop_opr;
|
||||
@ -75,19 +72,6 @@
|
||||
static LIR_Opr Exception_oop_opr;
|
||||
static LIR_Opr Exception_pc_opr;
|
||||
|
||||
#ifdef AARCH64
|
||||
static LIR_Opr as_long_opr(Register r) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
|
||||
}
|
||||
|
||||
static LIR_Opr as_pointer_opr(Register r) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
|
||||
}
|
||||
|
||||
static LIR_Opr as_double_opr(FloatRegister r) {
|
||||
return LIR_OprFact::double_fpu(r->encoding());
|
||||
}
|
||||
#else
|
||||
static LIR_Opr as_long_opr(Register r, Register r2) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2));
|
||||
}
|
||||
@ -99,7 +83,6 @@
|
||||
static LIR_Opr as_double_opr(FloatRegister r) {
|
||||
return LIR_OprFact::double_fpu(r->encoding(), r->successor()->encoding());
|
||||
}
|
||||
#endif
|
||||
|
||||
static LIR_Opr as_float_opr(FloatRegister r) {
|
||||
return LIR_OprFact::single_fpu(r->encoding());
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -44,9 +44,6 @@
|
||||
Label* profile_cast_success, Label* profile_cast_failure,
|
||||
Label* success, Label* failure);
|
||||
|
||||
#ifdef AARCH64
|
||||
void long_compare_helper(LIR_Opr opr1, LIR_Opr opr2);
|
||||
#endif // AARCH64
|
||||
|
||||
// Saves 4 given registers in reserved argument area.
|
||||
void save_in_reserved_area(Register r1, Register r2, Register r3, Register r4);
|
||||
@ -55,10 +52,10 @@
|
||||
void restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4);
|
||||
|
||||
enum {
|
||||
_call_stub_size = AARCH64_ONLY(32) NOT_AARCH64(16),
|
||||
_call_stub_size = 16,
|
||||
_call_aot_stub_size = 0,
|
||||
_exception_handler_size = PRODUCT_ONLY(AARCH64_ONLY(256) NOT_AARCH64(68)) NOT_PRODUCT(AARCH64_ONLY(256+216) NOT_AARCH64(68+60)),
|
||||
_deopt_handler_size = AARCH64_ONLY(32) NOT_AARCH64(16)
|
||||
_exception_handler_size = PRODUCT_ONLY(68) NOT_PRODUCT(68+60),
|
||||
_deopt_handler_size = 16
|
||||
};
|
||||
|
||||
public:
|
||||
|
@ -118,19 +118,6 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
|
||||
|
||||
|
||||
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
|
||||
#ifdef AARCH64
|
||||
if (v->type()->as_IntConstant() != NULL) {
|
||||
return v->type()->as_IntConstant()->value() == 0;
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
return v->type()->as_LongConstant()->value() == 0;
|
||||
} else if (v->type()->as_ObjectConstant() != NULL) {
|
||||
return v->type()->as_ObjectConstant()->value()->is_null_object();
|
||||
} else if (v->type()->as_FloatConstant() != NULL) {
|
||||
return jint_cast(v->type()->as_FloatConstant()->value()) == 0;
|
||||
} else if (v->type()->as_DoubleConstant() != NULL) {
|
||||
return jlong_cast(v->type()->as_DoubleConstant()->value()) == 0;
|
||||
}
|
||||
#endif // AARCH64
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -140,15 +127,10 @@ bool LIRGenerator::can_inline_as_constant(Value v) const {
|
||||
return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value());
|
||||
} else if (v->type()->as_ObjectConstant() != NULL) {
|
||||
return v->type()->as_ObjectConstant()->value()->is_null_object();
|
||||
#ifdef AARCH64
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
return Assembler::is_arith_imm_in_range(v->type()->as_LongConstant()->value());
|
||||
#else
|
||||
} else if (v->type()->as_FloatConstant() != NULL) {
|
||||
return v->type()->as_FloatConstant()->value() == 0.0f;
|
||||
} else if (v->type()->as_DoubleConstant() != NULL) {
|
||||
return v->type()->as_DoubleConstant()->value() == 0.0;
|
||||
#endif // AARCH64
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -160,39 +142,6 @@ bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
|
||||
}
|
||||
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
static bool can_inline_as_constant_in_cmp(Value v) {
|
||||
jlong constant;
|
||||
if (v->type()->as_IntConstant() != NULL) {
|
||||
constant = v->type()->as_IntConstant()->value();
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
constant = v->type()->as_LongConstant()->value();
|
||||
} else if (v->type()->as_ObjectConstant() != NULL) {
|
||||
return v->type()->as_ObjectConstant()->value()->is_null_object();
|
||||
} else if (v->type()->as_FloatConstant() != NULL) {
|
||||
return v->type()->as_FloatConstant()->value() == 0.0f;
|
||||
} else if (v->type()->as_DoubleConstant() != NULL) {
|
||||
return v->type()->as_DoubleConstant()->value() == 0.0;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Assembler::is_arith_imm_in_range(constant) || Assembler::is_arith_imm_in_range(-constant);
|
||||
}
|
||||
|
||||
|
||||
static bool can_inline_as_constant_in_logic(Value v) {
|
||||
if (v->type()->as_IntConstant() != NULL) {
|
||||
return Assembler::LogicalImmediate(v->type()->as_IntConstant()->value(), true).is_encoded();
|
||||
} else if (v->type()->as_LongConstant() != NULL) {
|
||||
return Assembler::LogicalImmediate(v->type()->as_LongConstant()->value(), false).is_encoded();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
|
||||
LIR_Opr LIRGenerator::safepoint_poll_register() {
|
||||
@ -211,48 +160,10 @@ static LIR_Opr make_constant(BasicType type, jlong c) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
void LIRGenerator::add_constant(LIR_Opr src, jlong c, LIR_Opr dest) {
|
||||
if (c == 0) {
|
||||
__ move(src, dest);
|
||||
return;
|
||||
}
|
||||
|
||||
BasicType type = src->type();
|
||||
bool is_neg = (c < 0);
|
||||
c = ABS(c);
|
||||
|
||||
if ((c >> 24) == 0) {
|
||||
for (int shift = 0; shift <= 12; shift += 12) {
|
||||
int part = ((int)c) & (right_n_bits(12) << shift);
|
||||
if (part != 0) {
|
||||
if (is_neg) {
|
||||
__ sub(src, make_constant(type, part), dest);
|
||||
} else {
|
||||
__ add(src, make_constant(type, part), dest);
|
||||
}
|
||||
src = dest;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
__ move(make_constant(type, c), dest);
|
||||
if (is_neg) {
|
||||
__ sub(src, dest, dest);
|
||||
} else {
|
||||
__ add(src, dest, dest);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
|
||||
void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) {
|
||||
assert(c != 0, "must be");
|
||||
#ifdef AARCH64
|
||||
add_constant(src, c, dest);
|
||||
#else
|
||||
// Find first non-zero bit
|
||||
int shift = 0;
|
||||
while ((c & (3 << shift)) == 0) {
|
||||
@ -272,7 +183,6 @@ void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) {
|
||||
if (c & (mask << 24)) {
|
||||
__ add(dest, LIR_OprFact::intConst(c & (mask << 24)), dest);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
static LIR_Address* make_address(LIR_Opr base, LIR_Opr index, LIR_Address::Scale scale, BasicType type) {
|
||||
@ -288,7 +198,6 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
|
||||
index = LIR_OprFact::illegalOpr;
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
if (base->type() == T_LONG) {
|
||||
LIR_Opr tmp = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, base, tmp);
|
||||
@ -302,26 +211,11 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
|
||||
// At this point base and index should be all ints and not constants
|
||||
assert(base->is_single_cpu() && !base->is_constant(), "base should be an non-constant int");
|
||||
assert(index->is_illegal() || (index->type() == T_INT && !index->is_constant()), "index should be an non-constant int");
|
||||
#endif
|
||||
|
||||
int max_disp;
|
||||
bool disp_is_in_range;
|
||||
bool embedded_shift;
|
||||
|
||||
#ifdef AARCH64
|
||||
int align = exact_log2(type2aelembytes(type, true));
|
||||
assert((disp & right_n_bits(align)) == 0, "displacement is not aligned");
|
||||
assert(shift == 0 || shift == align, "shift should be zero or equal to embedded align");
|
||||
max_disp = (1 << 12) << align;
|
||||
|
||||
if (disp >= 0) {
|
||||
disp_is_in_range = Assembler::is_unsigned_imm_in_range(disp, 12, align);
|
||||
} else {
|
||||
disp_is_in_range = Assembler::is_imm_in_range(disp, 9, 0);
|
||||
}
|
||||
|
||||
embedded_shift = true;
|
||||
#else
|
||||
switch (type) {
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
@ -344,7 +238,6 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
|
||||
}
|
||||
|
||||
disp_is_in_range = (-max_disp < disp && disp < max_disp);
|
||||
#endif // !AARCH64
|
||||
|
||||
if (index->is_register()) {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
@ -394,11 +287,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
|
||||
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
|
||||
assert(type == T_LONG || type == T_INT, "should be");
|
||||
LIR_Opr r = make_constant(type, x);
|
||||
#ifdef AARCH64
|
||||
bool imm_in_range = Assembler::LogicalImmediate(x, type == T_INT).is_encoded();
|
||||
#else
|
||||
bool imm_in_range = AsmOperand::is_rotated_imm(x);
|
||||
#endif // AARCH64
|
||||
if (!imm_in_range) {
|
||||
LIR_Opr tmp = new_register(type);
|
||||
__ move(r, tmp);
|
||||
@ -439,14 +328,9 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
|
||||
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
|
||||
assert(left != result, "should be different registers");
|
||||
if (is_power_of_2(c + 1)) {
|
||||
#ifdef AARCH64
|
||||
__ shift_left(left, log2_intptr(c + 1), result);
|
||||
__ sub(result, left, result);
|
||||
#else
|
||||
LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c + 1);
|
||||
LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT);
|
||||
__ sub(LIR_OprFact::address(addr), left, result); // rsb with shifted register
|
||||
#endif // AARCH64
|
||||
return true;
|
||||
} else if (is_power_of_2(c - 1)) {
|
||||
LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c - 1);
|
||||
@ -465,12 +349,7 @@ void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp)
|
||||
|
||||
void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
|
||||
assert(CardTable::dirty_card_val() == 0,
|
||||
"Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise");
|
||||
#ifdef AARCH64
|
||||
// AARCH64 has a register that is constant zero. We can use that one to set the
|
||||
// value in the card table to dirty.
|
||||
__ move(FrameMap::ZR_opr, card_addr);
|
||||
#else // AARCH64
|
||||
"Cannot use the register containing the card table base address directly");
|
||||
if((ci_card_table_address_as<intx>() & 0xff) == 0) {
|
||||
// If the card table base address is aligned to 256 bytes, we can use the register
|
||||
// that contains the card_table_base_address.
|
||||
@ -481,7 +360,6 @@ void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
|
||||
__ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
|
||||
__ move(tmp_zero, card_addr);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
|
||||
@ -492,24 +370,16 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
|
||||
|
||||
LIR_Opr tmp = FrameMap::LR_ptr_opr;
|
||||
|
||||
// TODO-AARCH64: check performance
|
||||
bool load_card_table_base_const = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw());
|
||||
bool load_card_table_base_const = VM_Version::supports_movw();
|
||||
if (load_card_table_base_const) {
|
||||
__ move((LIR_Opr)card_table_base, tmp);
|
||||
} else {
|
||||
__ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE);
|
||||
LIR_Opr tmp2 = tmp;
|
||||
__ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift)
|
||||
LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE);
|
||||
#else
|
||||
// Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
|
||||
// byte instruction does not support the addressing mode we need.
|
||||
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
|
||||
#endif
|
||||
if (UseCondCardMark) {
|
||||
if (ct->scanned_concurrently()) {
|
||||
__ membar_storeload();
|
||||
@ -679,63 +549,6 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
|
||||
info = state_for(x);
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
LIRItem left(x->x(), this);
|
||||
LIRItem right(x->y(), this);
|
||||
LIRItem* left_arg = &left;
|
||||
LIRItem* right_arg = &right;
|
||||
|
||||
// Test if instr is commutative and if we should swap
|
||||
if (x->is_commutative() && left.is_constant()) {
|
||||
left_arg = &right;
|
||||
right_arg = &left;
|
||||
}
|
||||
|
||||
left_arg->load_item();
|
||||
switch (x->op()) {
|
||||
case Bytecodes::_ldiv:
|
||||
right_arg->load_item();
|
||||
make_div_by_zero_check(right_arg->result(), T_LONG, info);
|
||||
__ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL);
|
||||
break;
|
||||
|
||||
case Bytecodes::_lrem: {
|
||||
right_arg->load_item();
|
||||
make_div_by_zero_check(right_arg->result(), T_LONG, info);
|
||||
// a % b is implemented with 2 instructions:
|
||||
// tmp = a/b (sdiv)
|
||||
// res = a - b*tmp (msub)
|
||||
LIR_Opr tmp = FrameMap::as_long_opr(Rtemp);
|
||||
__ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_lmul:
|
||||
if (right_arg->is_constant() && is_power_of_2_long(right_arg->get_jlong_constant())) {
|
||||
right_arg->dont_load_item();
|
||||
__ shift_left(left_arg->result(), exact_log2_long(right_arg->get_jlong_constant()), rlock_result(x));
|
||||
} else {
|
||||
right_arg->load_item();
|
||||
__ mul(left_arg->result(), right_arg->result(), rlock_result(x));
|
||||
}
|
||||
break;
|
||||
|
||||
case Bytecodes::_ladd:
|
||||
case Bytecodes::_lsub:
|
||||
if (right_arg->is_constant()) {
|
||||
jlong c = right_arg->get_jlong_constant();
|
||||
add_constant(left_arg->result(), (x->op() == Bytecodes::_ladd) ? c : -c, rlock_result(x));
|
||||
} else {
|
||||
right_arg->load_item();
|
||||
arithmetic_op_long(x->op(), rlock_result(x), left_arg->result(), right_arg->result(), NULL);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return;
|
||||
}
|
||||
#else
|
||||
switch (x->op()) {
|
||||
case Bytecodes::_ldiv:
|
||||
case Bytecodes::_lrem: {
|
||||
@ -777,7 +590,6 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
@ -804,20 +616,6 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ idiv(left_arg->result(), right_arg->result(), result, tmp, info);
|
||||
} else {
|
||||
#ifdef AARCH64
|
||||
left_arg->load_item();
|
||||
right_arg->load_item();
|
||||
make_div_by_zero_check(right_arg->result(), T_INT, info);
|
||||
if (x->op() == Bytecodes::_idiv) {
|
||||
__ idiv(left_arg->result(), right_arg->result(), rlock_result(x), LIR_OprFact::illegalOpr, NULL);
|
||||
} else {
|
||||
// a % b is implemented with 2 instructions:
|
||||
// tmp = a/b (sdiv)
|
||||
// res = a - b*tmp (msub)
|
||||
LIR_Opr tmp = FrameMap::as_opr(Rtemp);
|
||||
__ irem(left_arg->result(), right_arg->result(), rlock_result(x), tmp, NULL);
|
||||
}
|
||||
#else
|
||||
left_arg->load_item_force(FrameMap::R0_opr);
|
||||
right_arg->load_item_force(FrameMap::R2_opr);
|
||||
LIR_Opr tmp = FrameMap::R1_opr;
|
||||
@ -831,16 +629,8 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
__ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info);
|
||||
}
|
||||
__ move(out_reg, result);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
} else if (((x->op() == Bytecodes::_iadd) || (x->op() == Bytecodes::_isub)) && right_arg->is_constant()) {
|
||||
left_arg->load_item();
|
||||
jint c = right_arg->get_jint_constant();
|
||||
right_arg->dont_load_item();
|
||||
add_constant(left_arg->result(), (x->op() == Bytecodes::_iadd) ? c : -c, rlock_result(x));
|
||||
#endif // AARCH64
|
||||
|
||||
} else {
|
||||
left_arg->load_item();
|
||||
@ -852,7 +642,6 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
right_arg->load_item();
|
||||
}
|
||||
} else {
|
||||
AARCH64_ONLY(assert(!right_arg->is_constant(), "constant right_arg is already handled by this moment");)
|
||||
right_arg->load_nonconstant();
|
||||
}
|
||||
rlock_result(x);
|
||||
@ -880,11 +669,9 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) {
|
||||
LIRItem value(x->x(), this);
|
||||
LIRItem count(x->y(), this);
|
||||
|
||||
#ifndef AARCH64
|
||||
if (value.type()->is_long()) {
|
||||
count.set_destroys_register();
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
if (count.is_constant()) {
|
||||
assert(count.type()->as_IntConstant() != NULL, "should be");
|
||||
@ -906,15 +693,7 @@ void LIRGenerator::do_LogicOp(LogicOp* x) {
|
||||
|
||||
left.load_item();
|
||||
|
||||
#ifdef AARCH64
|
||||
if (right.is_constant() && can_inline_as_constant_in_logic(right.value())) {
|
||||
right.dont_load_item();
|
||||
} else {
|
||||
right.load_item();
|
||||
}
|
||||
#else
|
||||
right.load_nonconstant();
|
||||
#endif // AARCH64
|
||||
|
||||
logic_op(x->op(), rlock_result(x), left.result(), right.result());
|
||||
}
|
||||
@ -956,15 +735,7 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
|
||||
LIRItem right(x->y(), this);
|
||||
left.load_item();
|
||||
|
||||
#ifdef AARCH64
|
||||
if (right.is_constant() && can_inline_as_constant_in_cmp(right.value())) {
|
||||
right.dont_load_item();
|
||||
} else {
|
||||
right.load_item();
|
||||
}
|
||||
#else
|
||||
right.load_nonconstant();
|
||||
#endif // AARCH64
|
||||
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
|
||||
@ -987,19 +758,11 @@ LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_
|
||||
cmp_value.load_item();
|
||||
LIR_Opr result = new_register(T_INT);
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
tmp1 = new_pointer_register();
|
||||
tmp2 = new_pointer_register();
|
||||
}
|
||||
#endif
|
||||
__ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
|
||||
} else if (type == T_INT) {
|
||||
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result);
|
||||
} else if (type == T_LONG) {
|
||||
#ifndef AARCH64
|
||||
tmp1 = new_register(T_LONG);
|
||||
#endif // !AARCH64
|
||||
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
@ -1135,7 +898,6 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
|
||||
void LIRGenerator::do_Convert(Convert* x) {
|
||||
address runtime_func;
|
||||
switch (x->op()) {
|
||||
#ifndef AARCH64
|
||||
case Bytecodes::_l2f:
|
||||
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
|
||||
break;
|
||||
@ -1170,7 +932,6 @@ void LIRGenerator::do_Convert(Convert* x) {
|
||||
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2i);
|
||||
break;
|
||||
#endif // __SOFTFP__
|
||||
#endif // !AARCH64
|
||||
default: {
|
||||
LIRItem value(x->value(), this);
|
||||
value.load_item();
|
||||
@ -1488,7 +1249,6 @@ void LIRGenerator::do_If(If* x) {
|
||||
LIRItem* yin = &yitem;
|
||||
If::Condition cond = x->cond();
|
||||
|
||||
#ifndef AARCH64
|
||||
if (tag == longTag) {
|
||||
if (cond == If::gtr || cond == If::leq) {
|
||||
cond = Instruction::mirror(cond);
|
||||
@ -1497,20 +1257,11 @@ void LIRGenerator::do_If(If* x) {
|
||||
}
|
||||
xin->set_destroys_register();
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
xin->load_item();
|
||||
LIR_Opr left = xin->result();
|
||||
LIR_Opr right;
|
||||
|
||||
#ifdef AARCH64
|
||||
if (yin->is_constant() && can_inline_as_constant_in_cmp(yin->value())) {
|
||||
yin->dont_load_item();
|
||||
} else {
|
||||
yin->load_item();
|
||||
}
|
||||
right = yin->result();
|
||||
#else
|
||||
if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
|
||||
(cond == If::eql || cond == If::neq)) {
|
||||
// inline long zero
|
||||
@ -1519,7 +1270,6 @@ void LIRGenerator::do_If(If* x) {
|
||||
yin->load_nonconstant();
|
||||
right = yin->result();
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
set_no_result(x);
|
||||
|
||||
@ -1558,7 +1308,6 @@ void LIRGenerator::trace_block_entry(BlockBegin* block) {
|
||||
|
||||
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
|
||||
CodeEmitInfo* info) {
|
||||
#ifndef AARCH64
|
||||
if (value->is_double_cpu()) {
|
||||
assert(address->index()->is_illegal(), "should have a constant displacement");
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
@ -1566,14 +1315,11 @@ void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
|
||||
__ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info);
|
||||
return;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
// TODO-AARCH64 implement with stlr instruction
|
||||
__ store(value, address, info, lir_patch_none);
|
||||
}
|
||||
|
||||
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
|
||||
CodeEmitInfo* info) {
|
||||
#ifndef AARCH64
|
||||
if (result->is_double_cpu()) {
|
||||
assert(address->index()->is_illegal(), "should have a constant displacement");
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
@ -1581,7 +1327,5 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
|
||||
__ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info);
|
||||
return;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
// TODO-AARCH64 implement with ldar instruction
|
||||
__ load(address, result, info, lir_patch_none);
|
||||
}
|
||||
|
@ -27,7 +27,3 @@
|
||||
|
||||
void make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info);
|
||||
|
||||
#ifdef AARCH64
|
||||
// the helper for arithmetic
|
||||
void add_constant(LIR_Opr src, jlong c, LIR_Opr dest);
|
||||
#endif // AARCH64
|
||||
|
@ -33,17 +33,6 @@ FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return as_FloatRegister(fpu_regnrLo());
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
// Reg2 unused.
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg1 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
#else
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) != fnoreg, "Arm32 holds double in two regs.");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
@ -52,22 +41,12 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify() const {
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
#endif
|
||||
#ifdef AARCH64
|
||||
if (base()->type() == T_INT) {
|
||||
assert(index()->is_single_cpu() && (index()->type() == T_INT), "wrong index operand");
|
||||
} else {
|
||||
assert(index()->is_illegal() || index()->is_double_cpu() ||
|
||||
(index()->is_single_cpu() && (index()->is_oop_register() || index()->type() == T_INT)), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA, "wrong type for addresses");
|
||||
}
|
||||
#else
|
||||
assert(disp() == 0 || index()->is_illegal(), "can't have both");
|
||||
// Note: offsets higher than 4096 must not be rejected here. They can
|
||||
// be handled by the back-end or will be rejected if not.
|
||||
@ -81,6 +60,5 @@ void LIR_Address::verify() const {
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
@ -31,24 +31,17 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
|
||||
}
|
||||
|
||||
inline int LinearScan::num_physical_regs(BasicType type) {
|
||||
#ifndef AARCH64
|
||||
if (type == T_LONG || type == T_DOUBLE) return 2;
|
||||
#endif // !AARCH64
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
|
||||
#ifdef AARCH64
|
||||
return false;
|
||||
#else
|
||||
return type == T_DOUBLE || type == T_LONG;
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
inline bool LinearScan::is_caller_save(int assigned_reg) {
|
||||
assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
|
||||
// TODO-AARCH64 try to add callee-saved registers
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -46,11 +46,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
load_klass(Rtemp, receiver);
|
||||
cmp(Rtemp, iCache);
|
||||
b(verified, eq); // jump over alignment no-ops
|
||||
#ifdef AARCH64
|
||||
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
|
||||
#else
|
||||
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
|
||||
#endif
|
||||
align(CodeEntryAlignment);
|
||||
bind(verified);
|
||||
}
|
||||
@ -59,10 +55,6 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
|
||||
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
|
||||
assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");
|
||||
|
||||
#ifdef AARCH64
|
||||
// Extra nop for MT-safe patching in NativeJump::patch_verified_entry
|
||||
nop();
|
||||
#endif // AARCH64
|
||||
|
||||
arm_stack_overflow_check(bang_size_in_bytes, Rtemp);
|
||||
|
||||
@ -103,28 +95,12 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
|
||||
mov(tmp, (intptr_t)markOopDesc::prototype());
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedClassPointers) {
|
||||
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
encode_klass_not_null(tmp, klass); // Take care not to kill klass
|
||||
str_w(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
assert(oopDesc::mark_offset_in_bytes() + wordSize == oopDesc::klass_offset_in_bytes(), "adjust this code");
|
||||
stp(tmp, klass, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
}
|
||||
#else
|
||||
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
#endif // AARCH64
|
||||
|
||||
if (len->is_valid()) {
|
||||
str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
|
||||
}
|
||||
#ifdef AARCH64
|
||||
else if (UseCompressedClassPointers) {
|
||||
store_klass_gap(obj);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
@ -145,40 +121,6 @@ void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Regist
|
||||
const Register ptr = tmp2;
|
||||
|
||||
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
|
||||
#ifdef AARCH64
|
||||
if (obj_size_in_bytes < 0) {
|
||||
add_rc(ptr, obj, header_size);
|
||||
initialize_body(ptr, obj_end, tmp1);
|
||||
|
||||
} else {
|
||||
int base = instanceOopDesc::header_size() * HeapWordSize;
|
||||
assert(obj_size_in_bytes >= base, "should be");
|
||||
|
||||
const int zero_bytes = obj_size_in_bytes - base;
|
||||
assert((zero_bytes % wordSize) == 0, "should be");
|
||||
|
||||
if ((zero_bytes % (2*wordSize)) != 0) {
|
||||
str(ZR, Address(obj, base));
|
||||
base += wordSize;
|
||||
}
|
||||
|
||||
const int stp_count = zero_bytes / (2*wordSize);
|
||||
|
||||
if (zero_bytes > 8 * wordSize) {
|
||||
Label loop;
|
||||
add(ptr, obj, base);
|
||||
mov(tmp1, stp_count);
|
||||
bind(loop);
|
||||
subs(tmp1, tmp1, 1);
|
||||
stp(ZR, ZR, Address(ptr, 2*wordSize, post_indexed));
|
||||
b(loop, gt);
|
||||
} else {
|
||||
for (int i = 0; i < stp_count; i++) {
|
||||
stp(ZR, ZR, Address(obj, base + i * 2 * wordSize));
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) {
|
||||
mov(tmp1, 0);
|
||||
const int base = instanceOopDesc::header_size() * HeapWordSize;
|
||||
@ -190,7 +132,6 @@ void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Regist
|
||||
add(ptr, obj, header_size);
|
||||
initialize_body(ptr, obj_end, tmp1);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// StoreStore barrier required after complete initialization
|
||||
@ -227,12 +168,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len,
|
||||
const int scale_shift = exact_log2(element_size);
|
||||
const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level
|
||||
|
||||
#ifdef AARCH64
|
||||
mov_slow(Rtemp, max_array_allocation_length);
|
||||
cmp_32(len, Rtemp);
|
||||
#else
|
||||
cmp_32(len, max_array_allocation_length);
|
||||
#endif // AARCH64
|
||||
b(slow_case, hs);
|
||||
|
||||
bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0;
|
||||
@ -271,34 +207,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj,
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
|
||||
if (!UseBiasedLocking) {
|
||||
null_check_offset = offset();
|
||||
}
|
||||
ldr(hdr, obj);
|
||||
|
||||
// Test if object is already locked
|
||||
assert(markOopDesc::unlocked_value == 1, "adjust this code");
|
||||
tbnz(hdr, exact_log2(markOopDesc::unlocked_value), fast_lock);
|
||||
|
||||
// Check for recursive locking
|
||||
// See comments in InterpreterMacroAssembler::lock_object for
|
||||
// explanations on the fast recursive locking check.
|
||||
intptr_t mask = ((intptr_t)3) - ((intptr_t)os::vm_page_size());
|
||||
Assembler::LogicalImmediate imm(mask, false);
|
||||
mov(tmp2, SP);
|
||||
sub(tmp2, hdr, tmp2);
|
||||
ands(tmp2, tmp2, imm);
|
||||
b(slow_case, ne);
|
||||
|
||||
// Recursive locking: store 0 into a lock record
|
||||
str(ZR, Address(disp_hdr, mark_offset));
|
||||
b(fast_lock_done);
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
if (!UseBiasedLocking) {
|
||||
null_check_offset = offset();
|
||||
@ -328,7 +236,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj,
|
||||
// else need slow case
|
||||
b(slow_case);
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
bind(fast_lock);
|
||||
// Save previous object header in BasicLock structure and update the header
|
||||
|
@ -80,15 +80,8 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
|
||||
// Runtime1::exception_handler_for_pc
|
||||
if (_stub_id != Runtime1::forward_exception_id) {
|
||||
assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
|
||||
#ifdef AARCH64
|
||||
Label skip;
|
||||
cbz(R3, skip);
|
||||
jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp);
|
||||
bind(skip);
|
||||
#else
|
||||
cmp(R3, 0);
|
||||
jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
|
||||
#endif // AARCH64
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// Should not have pending exception in forward_exception stub
|
||||
@ -124,43 +117,6 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
|
||||
#define __ sasm->
|
||||
|
||||
// TODO: ARM - does this duplicate RegisterSaver in SharedRuntime?
|
||||
#ifdef AARCH64
|
||||
|
||||
//
|
||||
// On AArch64 registers save area has the following layout:
|
||||
//
|
||||
// |---------------------|
|
||||
// | return address (LR) |
|
||||
// | FP |
|
||||
// |---------------------|
|
||||
// | D31 |
|
||||
// | ... |
|
||||
// | D0 |
|
||||
// |---------------------|
|
||||
// | padding |
|
||||
// |---------------------|
|
||||
// | R28 |
|
||||
// | ... |
|
||||
// | R0 |
|
||||
// |---------------------| <-- SP
|
||||
//
|
||||
|
||||
enum RegisterLayout {
|
||||
number_of_saved_gprs = 29,
|
||||
number_of_saved_fprs = FloatRegisterImpl::number_of_registers,
|
||||
|
||||
R0_offset = 0,
|
||||
D0_offset = R0_offset + number_of_saved_gprs + 1,
|
||||
FP_offset = D0_offset + number_of_saved_fprs,
|
||||
LR_offset = FP_offset + 1,
|
||||
|
||||
reg_save_size = LR_offset + 1,
|
||||
|
||||
arg1_offset = reg_save_size * wordSize,
|
||||
arg2_offset = (reg_save_size + 1) * wordSize
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
enum RegisterLayout {
|
||||
fpu_save_size = pd_nof_fpu_regs_reg_alloc,
|
||||
@ -191,7 +147,6 @@ enum RegisterLayout {
|
||||
arg2_offset = (reg_save_size + 1) * wordSize
|
||||
};
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
|
||||
sasm->set_frame_size(reg_save_size /* in words */);
|
||||
@ -200,19 +155,6 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = H
|
||||
// Locations are offsets from sp after runtime call.
|
||||
OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0);
|
||||
|
||||
#ifdef AARCH64
|
||||
for (int i = 0; i < number_of_saved_gprs; i++) {
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((R0_offset + i) * VMRegImpl::slots_per_word), as_Register(i)->as_VMReg());
|
||||
}
|
||||
map->set_callee_saved(VMRegImpl::stack2reg(FP_offset * VMRegImpl::slots_per_word), FP->as_VMReg());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg(LR_offset * VMRegImpl::slots_per_word), LR->as_VMReg());
|
||||
|
||||
if (save_fpu_registers) {
|
||||
for (int i = 0; i < number_of_saved_fprs; i++) {
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((D0_offset + i) * VMRegImpl::slots_per_word), as_FloatRegister(i)->as_VMReg());
|
||||
}
|
||||
}
|
||||
#else
|
||||
int j=0;
|
||||
for (int i = R0_offset; i < R10_offset; i++) {
|
||||
if (j == FP_REG_NUM) {
|
||||
@ -235,7 +177,6 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = H
|
||||
map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
|
||||
}
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
return map;
|
||||
}
|
||||
@ -244,29 +185,6 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers
|
||||
__ block_comment("save_live_registers");
|
||||
sasm->set_frame_size(reg_save_size /* in words */);
|
||||
|
||||
#ifdef AARCH64
|
||||
assert((reg_save_size * wordSize) % StackAlignmentInBytes == 0, "SP should be aligned");
|
||||
|
||||
__ raw_push(FP, LR);
|
||||
|
||||
__ sub(SP, SP, (reg_save_size - 2) * wordSize);
|
||||
|
||||
for (int i = 0; i < align_down((int)number_of_saved_gprs, 2); i += 2) {
|
||||
__ stp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize));
|
||||
}
|
||||
|
||||
if (is_odd(number_of_saved_gprs)) {
|
||||
int i = number_of_saved_gprs - 1;
|
||||
__ str(as_Register(i), Address(SP, (R0_offset + i) * wordSize));
|
||||
}
|
||||
|
||||
if (save_fpu_registers) {
|
||||
assert (is_even(number_of_saved_fprs), "adjust this code");
|
||||
for (int i = 0; i < number_of_saved_fprs; i += 2) {
|
||||
__ stp_d(as_FloatRegister(i), as_FloatRegister(i+1), Address(SP, (D0_offset + i) * wordSize));
|
||||
}
|
||||
}
|
||||
#else
|
||||
__ push(RegisterSet(FP) | RegisterSet(LR));
|
||||
__ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
|
||||
if (save_fpu_registers) {
|
||||
@ -274,7 +192,6 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers
|
||||
} else {
|
||||
__ sub(SP, SP, fpu_save_size * wordSize);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
return generate_oop_map(sasm, save_fpu_registers);
|
||||
}
|
||||
@ -287,34 +204,6 @@ static void restore_live_registers(StubAssembler* sasm,
|
||||
bool restore_fpu_registers = HaveVFP) {
|
||||
__ block_comment("restore_live_registers");
|
||||
|
||||
#ifdef AARCH64
|
||||
if (restore_R0) {
|
||||
__ ldr(R0, Address(SP, R0_offset * wordSize));
|
||||
}
|
||||
|
||||
assert(is_odd(number_of_saved_gprs), "adjust this code");
|
||||
for (int i = 1; i < number_of_saved_gprs; i += 2) {
|
||||
__ ldp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize));
|
||||
}
|
||||
|
||||
if (restore_fpu_registers) {
|
||||
assert (is_even(number_of_saved_fprs), "adjust this code");
|
||||
for (int i = 0; i < number_of_saved_fprs; i += 2) {
|
||||
__ ldp_d(as_FloatRegister(i), as_FloatRegister(i+1), Address(SP, (D0_offset + i) * wordSize));
|
||||
}
|
||||
}
|
||||
|
||||
__ add(SP, SP, (reg_save_size - 2) * wordSize);
|
||||
|
||||
if (restore_FP_LR) {
|
||||
__ raw_pop(FP, LR);
|
||||
if (do_return) {
|
||||
__ ret();
|
||||
}
|
||||
} else {
|
||||
assert (!do_return, "return without restoring FP/LR");
|
||||
}
|
||||
#else
|
||||
if (restore_fpu_registers) {
|
||||
__ fldmiad(SP, FloatRegisterSet(D0, fpu_save_size / 2), writeback);
|
||||
if (!restore_R0) {
|
||||
@ -329,7 +218,6 @@ static void restore_live_registers(StubAssembler* sasm,
|
||||
} else {
|
||||
assert (!do_return, "return without restoring FP/LR");
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
@ -341,11 +229,9 @@ static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registe
|
||||
restore_live_registers(sasm, true, true, true, restore_fpu_registers);
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
|
||||
restore_live_registers(sasm, true, false, false, restore_fpu_registers);
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
|
||||
restore_live_registers(sasm, true, true, false, restore_fpu_registers);
|
||||
@ -386,15 +272,8 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
|
||||
static void restore_sp_for_method_handle(StubAssembler* sasm) {
|
||||
// Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
|
||||
__ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
|
||||
#ifdef AARCH64
|
||||
Label skip;
|
||||
__ cbz(Rtemp, skip);
|
||||
__ mov(SP, Rmh_SP_save);
|
||||
__ bind(skip);
|
||||
#else
|
||||
__ cmp(Rtemp, 0);
|
||||
__ mov(SP, Rmh_SP_save, ne);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
@ -500,22 +379,12 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
|
||||
__ cmp_32(R0, 0);
|
||||
|
||||
#ifdef AARCH64
|
||||
Label call_deopt;
|
||||
|
||||
restore_live_registers_without_return(sasm);
|
||||
__ b(call_deopt, ne);
|
||||
__ ret();
|
||||
|
||||
__ bind(call_deopt);
|
||||
#else
|
||||
restore_live_registers_except_FP_LR(sasm);
|
||||
__ pop(RegisterSet(FP) | RegisterSet(PC), eq);
|
||||
|
||||
// Deoptimization needed
|
||||
// TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back
|
||||
__ pop(RegisterSet(FP) | RegisterSet(LR));
|
||||
#endif // AARCH64
|
||||
|
||||
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
|
||||
|
||||
@ -622,12 +491,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
|
||||
Label slow_case, slow_case_no_pop;
|
||||
|
||||
#ifdef AARCH64
|
||||
__ mov_slow(Rtemp, C1_MacroAssembler::max_array_allocation_length);
|
||||
__ cmp_32(length, Rtemp);
|
||||
#else
|
||||
__ cmp_32(length, C1_MacroAssembler::max_array_allocation_length);
|
||||
#endif // AARCH64
|
||||
__ b(slow_case_no_pop, hs);
|
||||
|
||||
// Free some temporary registers
|
||||
@ -644,12 +508,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ mov(arr_size, MinObjAlignmentInBytesMask);
|
||||
__ and_32(tmp2, tmp1, (unsigned int)(Klass::_lh_header_size_mask << Klass::_lh_header_size_shift));
|
||||
|
||||
#ifdef AARCH64
|
||||
__ lslv_w(tmp3, length, tmp1);
|
||||
__ add(arr_size, arr_size, tmp3);
|
||||
#else
|
||||
__ add(arr_size, arr_size, AsmOperand(length, lsl, tmp1));
|
||||
#endif // AARCH64
|
||||
|
||||
__ add(arr_size, arr_size, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift));
|
||||
__ align_reg(arr_size, arr_size, MinObjAlignmentInBytes);
|
||||
@ -714,15 +573,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ load_klass(Rtemp, R0);
|
||||
__ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
|
||||
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
__ tbnz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), L);
|
||||
__ ret();
|
||||
__ bind(L);
|
||||
#else
|
||||
__ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
|
||||
__ bx(LR, eq);
|
||||
#endif // AARCH64
|
||||
|
||||
// Call VM
|
||||
OopMap* map = save_live_registers(sasm);
|
||||
@ -744,9 +596,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
case throw_index_exception_id:
|
||||
{
|
||||
__ set_info("index_range_check_failed", dont_gc_arguments);
|
||||
#ifdef AARCH64
|
||||
__ NOT_TESTED();
|
||||
#endif
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
|
||||
}
|
||||
break;
|
||||
@ -804,9 +653,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
case throw_incompatible_class_change_error_id:
|
||||
{
|
||||
__ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
|
||||
#ifdef AARCH64
|
||||
__ NOT_TESTED();
|
||||
#endif
|
||||
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
|
||||
}
|
||||
break;
|
||||
@ -890,7 +736,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
restore_live_registers_without_return(sasm);
|
||||
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
||||
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
||||
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg));
|
||||
__ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -39,27 +39,15 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, true);
|
||||
define_pd_global(bool, UseOnStackReplacement, true);
|
||||
define_pd_global(bool, ProfileInterpreter, true);
|
||||
#ifdef AARCH64
|
||||
define_pd_global(bool, TieredCompilation, trueInTiered);
|
||||
#else
|
||||
define_pd_global(bool, TieredCompilation, false);
|
||||
#endif
|
||||
define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
define_pd_global(intx, ConditionalMoveLimit, 4);
|
||||
// C2 gets to use all the float/double registers
|
||||
#ifdef AARCH64
|
||||
define_pd_global(intx, FLOATPRESSURE, 31);
|
||||
#else
|
||||
define_pd_global(intx, FLOATPRESSURE, 30);
|
||||
#endif
|
||||
define_pd_global(intx, FreqInlineSize, 175);
|
||||
#ifdef AARCH64
|
||||
define_pd_global(intx, INTPRESSURE, 27);
|
||||
#else
|
||||
define_pd_global(intx, INTPRESSURE, 12);
|
||||
#endif
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
// The default setting 16/16 seems to work best.
|
||||
|
@ -304,26 +304,12 @@ void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
|
||||
*((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
// Used by template based interpreter deoptimization
|
||||
void frame::interpreter_frame_set_stack_top(intptr_t* stack_top) {
|
||||
*((intptr_t**)addr_at(interpreter_frame_stack_top_offset)) = stack_top;
|
||||
}
|
||||
|
||||
// Used by template based interpreter deoptimization
|
||||
void frame::interpreter_frame_set_extended_sp(intptr_t* sp) {
|
||||
*((intptr_t**)addr_at(interpreter_frame_extended_sp_offset)) = sp;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
// Used by template based interpreter deoptimization
|
||||
void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
|
||||
*((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
|
||||
}
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
frame frame::sender_for_entry_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
@ -334,18 +320,12 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
|
||||
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
|
||||
map->clear();
|
||||
assert(map->include_argument_oops(), "should be set by clear");
|
||||
#ifdef AARCH64
|
||||
assert (jfa->last_Java_pc() != NULL, "pc should be stored");
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
|
||||
return fr;
|
||||
#else
|
||||
if (jfa->last_Java_pc() != NULL) {
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
|
||||
return fr;
|
||||
}
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp());
|
||||
return fr;
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@ -403,10 +383,6 @@ void frame::adjust_unextended_sp() {
|
||||
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
|
||||
// see x86 for comments
|
||||
map->set_location(FP->as_VMReg(), (address) link_addr);
|
||||
#ifdef AARCH64
|
||||
// also adjust a high part of register
|
||||
map->set_location(FP->as_VMReg()->next(), (address) link_addr);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
@ -539,14 +515,6 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
if (method->is_native()) {
|
||||
// Prior to calling into the runtime to report the method_exit both of
|
||||
// the possible return value registers are saved.
|
||||
#ifdef AARCH64
|
||||
// Return value registers are saved into the frame
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
res_addr = addr_at(interpreter_frame_fp_saved_result_offset);
|
||||
} else {
|
||||
res_addr = addr_at(interpreter_frame_gp_saved_result_offset);
|
||||
}
|
||||
#else
|
||||
// Return value registers are pushed to the native stack
|
||||
res_addr = (intptr_t*)sp();
|
||||
#ifdef __ABI_HARD__
|
||||
@ -555,7 +523,6 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
res_addr += 2;
|
||||
}
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
} else {
|
||||
res_addr = (intptr_t*)interpreter_frame_tos_address();
|
||||
}
|
||||
@ -602,12 +569,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
#ifdef AARCH64
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_stack_top);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_extended_sp);
|
||||
#else
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
#endif // AARCH64
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
|
||||
@ -631,7 +593,6 @@ intptr_t *frame::initial_deoptimization_info() {
|
||||
}
|
||||
|
||||
intptr_t* frame::real_fp() const {
|
||||
#ifndef AARCH64
|
||||
if (is_entry_frame()) {
|
||||
// Work-around: FP (currently) does not conform to the ABI for entry
|
||||
// frames (see generate_call_stub). Might be worth fixing as another CR.
|
||||
@ -644,7 +605,6 @@ intptr_t* frame::real_fp() const {
|
||||
#endif
|
||||
return new_fp;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
if (_cb != NULL) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
|
@ -37,22 +37,12 @@
|
||||
sender_sp_offset = 2,
|
||||
|
||||
// Interpreter frames
|
||||
#ifdef AARCH64
|
||||
interpreter_frame_gp_saved_result_offset = 4, // for native calls only
|
||||
interpreter_frame_fp_saved_result_offset = 3, // for native calls only
|
||||
#endif
|
||||
interpreter_frame_oop_temp_offset = 2, // for native calls only
|
||||
|
||||
interpreter_frame_sender_sp_offset = -1,
|
||||
#ifdef AARCH64
|
||||
interpreter_frame_stack_top_offset = interpreter_frame_sender_sp_offset - 1,
|
||||
interpreter_frame_extended_sp_offset = interpreter_frame_stack_top_offset - 1,
|
||||
interpreter_frame_method_offset = interpreter_frame_extended_sp_offset - 1,
|
||||
#else
|
||||
// outgoing sp before a call to an invoked method
|
||||
interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1,
|
||||
interpreter_frame_method_offset = interpreter_frame_last_sp_offset - 1,
|
||||
#endif // AARCH64
|
||||
interpreter_frame_mirror_offset = interpreter_frame_method_offset - 1,
|
||||
interpreter_frame_mdp_offset = interpreter_frame_mirror_offset - 1,
|
||||
interpreter_frame_cache_offset = interpreter_frame_mdp_offset - 1,
|
||||
@ -64,7 +54,7 @@
|
||||
interpreter_frame_monitor_block_bottom_offset = interpreter_frame_initial_sp_offset,
|
||||
|
||||
// Entry frames
|
||||
entry_frame_call_wrapper_offset = AARCH64_ONLY(2) NOT_AARCH64(0)
|
||||
entry_frame_call_wrapper_offset = 0
|
||||
};
|
||||
|
||||
intptr_t ptr_at(int offset) const {
|
||||
@ -107,9 +97,7 @@
|
||||
|
||||
frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc);
|
||||
|
||||
#ifndef AARCH64
|
||||
frame(intptr_t* sp, intptr_t* fp);
|
||||
#endif // !AARCH64
|
||||
|
||||
void init(intptr_t* sp, intptr_t* fp, address pc);
|
||||
|
||||
@ -119,18 +107,11 @@
|
||||
|
||||
inline address* sender_pc_addr() const;
|
||||
|
||||
#ifdef AARCH64
|
||||
// Used by template based interpreter deoptimization
|
||||
void interpreter_frame_set_stack_top(intptr_t* stack_top);
|
||||
void interpreter_frame_set_extended_sp(intptr_t* sp);
|
||||
|
||||
#else
|
||||
// expression stack tos if we are nested in a java call
|
||||
intptr_t* interpreter_frame_last_sp() const;
|
||||
|
||||
// deoptimization support
|
||||
void interpreter_frame_set_last_sp(intptr_t* sp);
|
||||
#endif // AARCH64
|
||||
|
||||
// helper to update a map with callee-saved FP
|
||||
static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
|
||||
|
@ -83,7 +83,6 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
_sp = sp;
|
||||
@ -104,7 +103,6 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
}
|
||||
}
|
||||
|
||||
#endif // !AARCH64
|
||||
|
||||
// Accessors
|
||||
|
||||
@ -148,11 +146,9 @@ inline intptr_t** frame::interpreter_frame_locals_addr() const {
|
||||
return (intptr_t**)addr_at(interpreter_frame_locals_offset);
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
inline intptr_t* frame::interpreter_frame_last_sp() const {
|
||||
return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_bcp_addr() const {
|
||||
return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
|
||||
@ -181,12 +177,6 @@ inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
#ifdef AARCH64
|
||||
intptr_t* stack_top = (intptr_t*)*addr_at(interpreter_frame_stack_top_offset);
|
||||
assert(stack_top != NULL, "should be stored before call");
|
||||
assert(stack_top <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
|
||||
return stack_top;
|
||||
#else
|
||||
intptr_t* last_sp = interpreter_frame_last_sp();
|
||||
if (last_sp == NULL ) {
|
||||
return sp();
|
||||
@ -197,7 +187,6 @@ inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
|
||||
return last_sp;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_temp_oop_addr() const {
|
||||
|
@ -60,27 +60,16 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
|
||||
|
||||
BLOCK_COMMENT("PreBarrier");
|
||||
|
||||
#ifdef AARCH64
|
||||
callee_saved_regs = align_up(callee_saved_regs, 2);
|
||||
for (int i = 0; i < callee_saved_regs; i += 2) {
|
||||
__ raw_push(as_Register(i), as_Register(i+1));
|
||||
}
|
||||
#else
|
||||
RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
|
||||
__ push(saved_regs | R9ifScratched);
|
||||
#endif // AARCH64
|
||||
|
||||
if (addr != R0) {
|
||||
assert_different_registers(count, R0);
|
||||
__ mov(R0, addr);
|
||||
}
|
||||
#ifdef AARCH64
|
||||
__ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_pre_*_entry takes size_t
|
||||
#else
|
||||
if (count != R1) {
|
||||
__ mov(R1, count);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
if (UseCompressedOops) {
|
||||
__ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry));
|
||||
@ -88,13 +77,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
|
||||
__ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry));
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
|
||||
__ raw_pop(as_Register(i), as_Register(i+1));
|
||||
}
|
||||
#else
|
||||
__ pop(saved_regs | R9ifScratched);
|
||||
#endif // AARCH64
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,9 +89,6 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
|
||||
assert_different_registers(count, R0);
|
||||
__ mov(R0, addr);
|
||||
}
|
||||
#ifdef AARCH64
|
||||
__ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_post_entry takes size_t
|
||||
#else
|
||||
if (count != R1) {
|
||||
__ mov(R1, count);
|
||||
}
|
||||
@ -120,17 +100,14 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
|
||||
// difficult for this particular call site.
|
||||
__ push(R9);
|
||||
#endif // !R9_IS_SCRATCHED
|
||||
#endif // !AARCH64
|
||||
__ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
|
||||
#ifndef AARCH64
|
||||
#if R9_IS_SCRATCHED
|
||||
__ pop(R9);
|
||||
#endif // !R9_IS_SCRATCHED
|
||||
#endif // !AARCH64
|
||||
}
|
||||
|
||||
// G1 pre-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// Blows all volatile registers R0-R3, Rtemp, LR).
|
||||
// If store_addr != noreg, then previous value is loaded from [store_addr];
|
||||
// in such case store_addr and new_val registers are preserved;
|
||||
// otherwise pre_val register is preserved.
|
||||
@ -186,20 +163,12 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
__ bind(runtime);
|
||||
|
||||
// save the live input values
|
||||
#ifdef AARCH64
|
||||
if (store_addr != noreg) {
|
||||
__ raw_push(store_addr, new_val);
|
||||
} else {
|
||||
__ raw_push(pre_val, ZR);
|
||||
}
|
||||
#else
|
||||
if (store_addr != noreg) {
|
||||
// avoid raw_push to support any ordering of store_addr and new_val
|
||||
__ push(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
__ push(pre_val);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
if (pre_val != R0) {
|
||||
__ mov(R0, pre_val);
|
||||
@ -208,25 +177,17 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
|
||||
|
||||
#ifdef AARCH64
|
||||
if (store_addr != noreg) {
|
||||
__ raw_pop(store_addr, new_val);
|
||||
} else {
|
||||
__ raw_pop(pre_val, ZR);
|
||||
}
|
||||
#else
|
||||
if (store_addr != noreg) {
|
||||
__ pop(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
__ pop(pre_val);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
// G1 post-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// Blows all volatile registers R0-R3, Rtemp, LR).
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
@ -246,13 +207,8 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ eor(tmp1, store_addr, new_val);
|
||||
#ifdef AARCH64
|
||||
__ logical_shift_right(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ cbz(tmp1, done);
|
||||
#else
|
||||
__ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
|
||||
__ b(done, eq);
|
||||
#endif
|
||||
|
||||
// crosses regions, storing NULL?
|
||||
|
||||
@ -333,12 +289,8 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
|
||||
const Register store_addr = obj.base();
|
||||
if (obj.index() != noreg) {
|
||||
assert (obj.disp() == 0, "index or displacement, not both");
|
||||
#ifdef AARCH64
|
||||
__ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
|
||||
#else
|
||||
assert(obj.offset_op() == add_offset, "addition is expected");
|
||||
__ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
|
||||
#endif // AARCH64
|
||||
} else if (obj.disp() != 0) {
|
||||
__ add(store_addr, obj.base(), obj.disp());
|
||||
}
|
||||
@ -415,16 +367,10 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
|
||||
__ set_info("g1_pre_barrier_slow_id", false);
|
||||
|
||||
// save at least the registers that need saving if the runtime is called
|
||||
#ifdef AARCH64
|
||||
__ raw_push(R0, R1);
|
||||
__ raw_push(R2, R3);
|
||||
const int nb_saved_regs = 4;
|
||||
#else // AARCH64
|
||||
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
|
||||
const int nb_saved_regs = 6;
|
||||
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
|
||||
__ push(saved_regs);
|
||||
#endif // AARCH64
|
||||
|
||||
const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
|
||||
const Register r_index_1 = R1;
|
||||
@ -454,12 +400,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
|
||||
|
||||
__ bind(done);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ raw_pop(R2, R3);
|
||||
__ raw_pop(R0, R1);
|
||||
#else // AARCH64
|
||||
__ pop(saved_regs);
|
||||
#endif // AARCH64
|
||||
|
||||
__ ret();
|
||||
|
||||
@ -492,16 +433,10 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
|
||||
|
||||
// save at least the registers that need saving if the runtime is called
|
||||
#ifdef AARCH64
|
||||
__ raw_push(R0, R1);
|
||||
__ raw_push(R2, R3);
|
||||
const int nb_saved_regs = 4;
|
||||
#else // AARCH64
|
||||
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
|
||||
const int nb_saved_regs = 6;
|
||||
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
|
||||
__ push(saved_regs);
|
||||
#endif // AARCH64
|
||||
|
||||
const Register r_card_addr_0 = R0; // must be R0 for the slow case
|
||||
const Register r_obj_0 = R0;
|
||||
@ -528,12 +463,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
|
||||
__ bind(done);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ raw_pop(R2, R3);
|
||||
__ raw_pop(R0, R1);
|
||||
#else // AARCH64
|
||||
__ pop(saved_regs);
|
||||
#endif // AARCH64
|
||||
|
||||
__ ret();
|
||||
|
||||
|
@ -37,12 +37,6 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
|
||||
case T_OBJECT:
|
||||
case T_ARRAY: {
|
||||
if (in_heap) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
__ ldr_w(dst, src);
|
||||
__ decode_heap_oop(dst);
|
||||
} else
|
||||
#endif // AARCH64
|
||||
{
|
||||
__ ldr(dst, src);
|
||||
}
|
||||
@ -59,13 +53,9 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
|
||||
case T_INT: __ ldr_s32 (dst, src); break;
|
||||
case T_ADDRESS: __ ldr (dst, src); break;
|
||||
case T_LONG:
|
||||
#ifdef AARCH64
|
||||
__ ldr (dst, src); break;
|
||||
#else
|
||||
assert(dst == noreg, "only to ltos");
|
||||
__ add (src.index(), src.index(), src.base());
|
||||
__ ldmia (src.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
|
||||
#endif // AARCH64
|
||||
break;
|
||||
#ifdef __SOFTFP__
|
||||
case T_FLOAT:
|
||||
@ -102,15 +92,6 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
case T_OBJECT:
|
||||
case T_ARRAY: {
|
||||
if (in_heap) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
assert(!dst.uses(src), "not enough registers");
|
||||
if (!is_null) {
|
||||
__ encode_heap_oop(src);
|
||||
}
|
||||
__ str_w(val, obj);
|
||||
} else
|
||||
#endif // AARCH64
|
||||
{
|
||||
__ str(val, obj);
|
||||
}
|
||||
@ -130,13 +111,9 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
case T_INT: __ str (val, obj); break;
|
||||
case T_ADDRESS: __ str (val, obj); break;
|
||||
case T_LONG:
|
||||
#ifdef AARCH64
|
||||
__ str (val, obj); break;
|
||||
#else // AARCH64
|
||||
assert(val == noreg, "only tos");
|
||||
__ add (obj.index(), obj.index(), obj.base());
|
||||
__ stmia (obj.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
|
||||
#endif // AARCH64
|
||||
break;
|
||||
#ifdef __SOFTFP__
|
||||
case T_FLOAT:
|
||||
@ -188,7 +165,7 @@ void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Regi
|
||||
assert_different_registers(obj, obj_end, top_addr, heap_end);
|
||||
}
|
||||
|
||||
bool load_const = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw() ); // TODO-AARCH64 check performance
|
||||
bool load_const = VM_Version::supports_movw();
|
||||
if (load_const) {
|
||||
__ mov_address(top_addr, (address)Universe::heap()->top_addr(), symbolic_Relocation::eden_top_reference);
|
||||
} else {
|
||||
@ -197,13 +174,7 @@ void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Regi
|
||||
// Calculate new heap_top by adding the size of the object
|
||||
Label retry;
|
||||
__ bind(retry);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ ldxr(obj, top_addr);
|
||||
#else
|
||||
__ ldr(obj, Address(top_addr));
|
||||
#endif // AARCH64
|
||||
|
||||
__ ldr(heap_end, Address(top_addr, (intptr_t)ch->end_addr() - (intptr_t)ch->top_addr()));
|
||||
__ add_rc(obj_end, obj, size_expression);
|
||||
// Check if obj_end wrapped around, i.e., obj_end < obj. If yes, jump to the slow case.
|
||||
@ -213,13 +184,8 @@ void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Regi
|
||||
__ cmp(obj_end, heap_end);
|
||||
__ b(slow_case, hi);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ stxr(heap_end/*scratched*/, obj_end, top_addr);
|
||||
__ cbnz_w(heap_end, retry);
|
||||
#else
|
||||
__ atomic_cas_bool(obj, obj_end, top_addr, 0, heap_end/*scratched*/);
|
||||
__ b(retry, ne);
|
||||
#endif // AARCH64
|
||||
|
||||
incr_allocated_bytes(masm, size_expression, tmp1);
|
||||
}
|
||||
@ -239,11 +205,6 @@ void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, Regi
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, RegisterOrConstant size_in_bytes, Register tmp) {
|
||||
#ifdef AARCH64
|
||||
__ ldr(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
__ add_rc(tmp, tmp, size_in_bytes);
|
||||
__ str(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
#else
|
||||
// Bump total bytes allocated by this thread
|
||||
Label done;
|
||||
|
||||
@ -281,5 +242,4 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, RegisterOrC
|
||||
|
||||
// Unborrow the Rthread
|
||||
__ sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
@ -119,7 +119,6 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
|
||||
Possible cause is a cache miss (card table base address resides in a
|
||||
rarely accessed area of thread descriptor).
|
||||
*/
|
||||
// TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
|
||||
__ mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||
}
|
||||
|
||||
@ -136,12 +135,7 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
|
||||
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
|
||||
|
||||
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
|
||||
#ifdef AARCH64
|
||||
add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift));
|
||||
Address card_table_addr(card_table_base);
|
||||
#else
|
||||
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
|
||||
#endif
|
||||
|
||||
if (UseCondCardMark) {
|
||||
if (ct->scanned_concurrently()) {
|
||||
@ -164,9 +158,6 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
|
||||
}
|
||||
|
||||
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
|
||||
#ifdef AARCH64
|
||||
strb(ZR, card_table_addr);
|
||||
#else
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
CardTable* ct = ctbs->card_table();
|
||||
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
|
||||
@ -178,5 +169,4 @@ void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_
|
||||
__ mov(tmp, 0);
|
||||
__ strb(tmp, card_table_addr);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
@ -25,19 +25,7 @@
|
||||
#ifndef CPU_ARM_VM_GLOBALDEFINITIONS_ARM_HPP
|
||||
#define CPU_ARM_VM_GLOBALDEFINITIONS_ARM_HPP
|
||||
|
||||
#ifdef AARCH64
|
||||
#define AARCH64_ONLY(code) code
|
||||
#define AARCH64_ONLY_ARG(arg) , arg
|
||||
#define NOT_AARCH64(code)
|
||||
#define NOT_AARCH64_ARG(arg)
|
||||
#else
|
||||
#define AARCH64_ONLY(code)
|
||||
#define AARCH64_ONLY_ARG(arg)
|
||||
#define NOT_AARCH64(code) code
|
||||
#define NOT_AARCH64_ARG(arg) , arg
|
||||
#endif
|
||||
|
||||
const int StackAlignmentInBytes = AARCH64_ONLY(16) NOT_AARCH64(8);
|
||||
const int StackAlignmentInBytes = 8;
|
||||
|
||||
// Indicates whether the C calling conventions require that
|
||||
// 32-bit integer argument values are extended to 64 bits.
|
||||
@ -49,24 +37,19 @@ const bool HaveVFP = false;
|
||||
const bool HaveVFP = true;
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_PCS_VFP) || defined(AARCH64)
|
||||
#if defined(__ARM_PCS_VFP)
|
||||
#define __ABI_HARD__
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_ARCH_7A__) || defined(AARCH64)
|
||||
#if defined(__ARM_ARCH_7A__)
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
#endif
|
||||
|
||||
#define STUBROUTINES_MD_HPP "stubRoutines_arm.hpp"
|
||||
#define INTERP_MASM_MD_HPP "interp_masm_arm.hpp"
|
||||
#define TEMPLATETABLE_MD_HPP "templateTable_arm.hpp"
|
||||
#ifdef AARCH64
|
||||
#define ADGLOBALS_MD_HPP "adfiles/adGlobals_arm_64.hpp"
|
||||
#define AD_MD_HPP "adfiles/ad_arm_64.hpp"
|
||||
#else
|
||||
#define ADGLOBALS_MD_HPP "adfiles/adGlobals_arm_32.hpp"
|
||||
#define AD_MD_HPP "adfiles/ad_arm_32.hpp"
|
||||
#endif
|
||||
#define C1_LIRGENERATOR_MD_HPP "c1_LIRGenerator_arm.hpp"
|
||||
|
||||
#ifdef TARGET_COMPILER_gcc
|
||||
|
@ -88,13 +88,5 @@ define_pd_global(bool, ThreadLocalHandshakes, false);
|
||||
notproduct, \
|
||||
range, \
|
||||
constraint, \
|
||||
writeable) \
|
||||
\
|
||||
develop(bool, VerifyInterpreterStackTop, false, \
|
||||
"Verify interpreter stack top at every stack expansion (AArch64 only)") \
|
||||
\
|
||||
develop(bool, ZapHighNonSignificantBits, false, \
|
||||
"Zap high non-significant bits of values (AArch64 only)") \
|
||||
\
|
||||
|
||||
writeable)
|
||||
#endif // CPU_ARM_VM_GLOBALS_ARM_HPP
|
||||
|
@ -35,7 +35,7 @@
|
||||
#define __ masm->
|
||||
|
||||
int InlineCacheBuffer::ic_stub_code_size() {
|
||||
return (AARCH64_ONLY(8) NOT_AARCH64(4)) * Assembler::InstructionSize;
|
||||
return (4 * Assembler::InstructionSize);
|
||||
}
|
||||
|
||||
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
|
||||
|
@ -29,49 +29,12 @@
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
static int icache_flush(address addr, int lines, int magic) {
|
||||
// TODO-AARCH64 Figure out actual cache line size (mrs Xt, CTR_EL0)
|
||||
|
||||
address p = addr;
|
||||
for (int i = 0; i < lines; i++, p += ICache::line_size) {
|
||||
__asm__ volatile(
|
||||
" dc cvau, %[p]"
|
||||
:
|
||||
: [p] "r" (p)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
__asm__ volatile(
|
||||
" dsb ish"
|
||||
: : : "memory");
|
||||
|
||||
p = addr;
|
||||
for (int i = 0; i < lines; i++, p += ICache::line_size) {
|
||||
__asm__ volatile(
|
||||
" ic ivau, %[p]"
|
||||
:
|
||||
: [p] "r" (p)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
__asm__ volatile(
|
||||
" dsb ish\n\t"
|
||||
" isb\n\t"
|
||||
: : : "memory");
|
||||
|
||||
return magic;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static int icache_flush(address addr, int lines, int magic) {
|
||||
__builtin___clear_cache(addr, addr + (lines << ICache::log2_line_size));
|
||||
return magic;
|
||||
}
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
|
||||
address start = (address)icache_flush;
|
||||
|
@ -54,7 +54,7 @@ InterpreterMacroAssembler::InterpreterMacroAssembler(CodeBuffer* code) : MacroAs
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
|
||||
#if defined(ASSERT) && !defined(AARCH64)
|
||||
#ifdef ASSERT
|
||||
// Ensure that last_sp is not filled.
|
||||
{ Label L;
|
||||
ldr(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
@ -62,27 +62,15 @@ void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entr
|
||||
stop("InterpreterMacroAssembler::call_VM_helper: last_sp != NULL");
|
||||
bind(L);
|
||||
}
|
||||
#endif // ASSERT && !AARCH64
|
||||
#endif // ASSERT
|
||||
|
||||
// Rbcp must be saved/restored since it may change due to GC.
|
||||
save_bcp();
|
||||
|
||||
#ifdef AARCH64
|
||||
check_no_cached_stack_top(Rtemp);
|
||||
save_stack_top();
|
||||
check_extended_sp(Rtemp);
|
||||
cut_sp_before_call();
|
||||
#endif // AARCH64
|
||||
|
||||
// super call
|
||||
MacroAssembler::call_VM_helper(oop_result, entry_point, number_of_arguments, check_exceptions);
|
||||
|
||||
#ifdef AARCH64
|
||||
// Restore SP to extended SP
|
||||
restore_sp_after_call(Rtemp);
|
||||
check_stack_top();
|
||||
clear_cached_stack_top();
|
||||
#endif // AARCH64
|
||||
|
||||
// Restore interpreter specific registers.
|
||||
restore_bcp();
|
||||
@ -128,10 +116,8 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
const Address tos_addr(thread_state, JvmtiThreadState::earlyret_tos_offset());
|
||||
const Address oop_addr(thread_state, JvmtiThreadState::earlyret_oop_offset());
|
||||
const Address val_addr(thread_state, JvmtiThreadState::earlyret_value_offset());
|
||||
#ifndef AARCH64
|
||||
const Address val_addr_hi(thread_state, JvmtiThreadState::earlyret_value_offset()
|
||||
+ in_ByteSize(wordSize));
|
||||
#endif // !AARCH64
|
||||
|
||||
Register zero = zero_register(Rtemp);
|
||||
|
||||
@ -141,11 +127,7 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
|
||||
break;
|
||||
|
||||
#ifdef AARCH64
|
||||
case ltos: ldr(R0_tos, val_addr); break;
|
||||
#else
|
||||
case ltos: ldr(R1_tos_hi, val_addr_hi); // fall through
|
||||
#endif // AARCH64
|
||||
case btos: // fall through
|
||||
case ztos: // fall through
|
||||
case ctos: // fall through
|
||||
@ -163,9 +145,7 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
}
|
||||
// Clean up tos value in the thread object
|
||||
str(zero, val_addr);
|
||||
#ifndef AARCH64
|
||||
str(zero, val_addr_hi);
|
||||
#endif // !AARCH64
|
||||
|
||||
mov(Rtemp, (int) ilgl);
|
||||
str_32(Rtemp, tos_addr);
|
||||
@ -220,7 +200,6 @@ void InterpreterMacroAssembler::get_index_at_bcp(Register index, int bcp_offset,
|
||||
ldrb(tmp_reg, Address(Rbcp, bcp_offset));
|
||||
orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
|
||||
} else if (index_size == sizeof(u4)) {
|
||||
// TODO-AARCH64: consider using unaligned access here
|
||||
ldrb(index, Address(Rbcp, bcp_offset+3));
|
||||
ldrb(tmp_reg, Address(Rbcp, bcp_offset+2));
|
||||
orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
|
||||
@ -252,7 +231,6 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
|
||||
|
||||
// convert from field index to ConstantPoolCacheEntry index
|
||||
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
|
||||
// TODO-AARCH64 merge this shift with shift "add(..., Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord))" after this method is called
|
||||
logical_shift_left(index, index, 2);
|
||||
}
|
||||
|
||||
@ -261,13 +239,8 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
|
||||
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
|
||||
// caution index and bytecode can be the same
|
||||
add(bytecode, cache, AsmOperand(index, lsl, LogBytesPerWord));
|
||||
#ifdef AARCH64
|
||||
add(bytecode, bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
||||
ldarb(bytecode, bytecode);
|
||||
#else
|
||||
ldrb(bytecode, Address(bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())));
|
||||
TemplateTable::volatile_barrier(MacroAssembler::LoadLoad, noreg, true);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// Sets cache. Blows reg_tmp.
|
||||
@ -365,31 +338,21 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||
ldr(supers_arr, Address(Rsub_klass, Klass::secondary_supers_offset()));
|
||||
|
||||
ldr_u32(supers_cnt, Address(supers_arr, Array<Klass*>::length_offset_in_bytes())); // Load the array length
|
||||
#ifdef AARCH64
|
||||
cbz(supers_cnt, not_subtype);
|
||||
add(supers_arr, supers_arr, Array<Klass*>::base_offset_in_bytes());
|
||||
#else
|
||||
cmp(supers_cnt, 0);
|
||||
|
||||
// Skip to the start of array elements and prefetch the first super-klass.
|
||||
ldr(cur_super, Address(supers_arr, Array<Klass*>::base_offset_in_bytes(), pre_indexed), ne);
|
||||
b(not_subtype, eq);
|
||||
#endif // AARCH64
|
||||
|
||||
bind(loop);
|
||||
|
||||
#ifdef AARCH64
|
||||
ldr(cur_super, Address(supers_arr, wordSize, post_indexed));
|
||||
#endif // AARCH64
|
||||
|
||||
cmp(cur_super, Rsuper_klass);
|
||||
b(update_cache, eq);
|
||||
|
||||
subs(supers_cnt, supers_cnt, 1);
|
||||
|
||||
#ifndef AARCH64
|
||||
ldr(cur_super, Address(supers_arr, wordSize, pre_indexed), ne);
|
||||
#endif // !AARCH64
|
||||
|
||||
b(loop, ne);
|
||||
|
||||
@ -419,33 +382,18 @@ void InterpreterMacroAssembler::pop_i(Register r) {
|
||||
zap_high_non_significant_bits(r);
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
void InterpreterMacroAssembler::pop_l(Register r) {
|
||||
assert(r != Rstack_top, "unpredictable instruction");
|
||||
ldr(r, Address(Rstack_top, 2*wordSize, post_indexed));
|
||||
}
|
||||
#else
|
||||
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
|
||||
assert_different_registers(lo, hi);
|
||||
assert(lo < hi, "lo must be < hi");
|
||||
pop(RegisterSet(lo) | RegisterSet(hi));
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
void InterpreterMacroAssembler::pop_f(FloatRegister fd) {
|
||||
#ifdef AARCH64
|
||||
ldr_s(fd, Address(Rstack_top, wordSize, post_indexed));
|
||||
#else
|
||||
fpops(fd);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_d(FloatRegister fd) {
|
||||
#ifdef AARCH64
|
||||
ldr_d(fd, Address(Rstack_top, 2*wordSize, post_indexed));
|
||||
#else
|
||||
fpopd(fd);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
@ -458,11 +406,7 @@ void InterpreterMacroAssembler::pop(TosState state) {
|
||||
case ctos: // fall through
|
||||
case stos: // fall through
|
||||
case itos: pop_i(R0_tos); break;
|
||||
#ifdef AARCH64
|
||||
case ltos: pop_l(R0_tos); break;
|
||||
#else
|
||||
case ltos: pop_l(R0_tos_lo, R1_tos_hi); break;
|
||||
#endif // AARCH64
|
||||
#ifdef __SOFTFP__
|
||||
case ftos: pop_i(R0_tos); break;
|
||||
case dtos: pop_l(R0_tos_lo, R1_tos_hi); break;
|
||||
@ -488,36 +432,18 @@ void InterpreterMacroAssembler::push_i(Register r) {
|
||||
check_stack_top_on_expansion();
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
void InterpreterMacroAssembler::push_l(Register r) {
|
||||
assert(r != Rstack_top, "unpredictable instruction");
|
||||
stp(r, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
|
||||
check_stack_top_on_expansion();
|
||||
}
|
||||
#else
|
||||
void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
|
||||
assert_different_registers(lo, hi);
|
||||
assert(lo < hi, "lo must be < hi");
|
||||
push(RegisterSet(lo) | RegisterSet(hi));
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
void InterpreterMacroAssembler::push_f() {
|
||||
#ifdef AARCH64
|
||||
str_s(S0_tos, Address(Rstack_top, -wordSize, pre_indexed));
|
||||
check_stack_top_on_expansion();
|
||||
#else
|
||||
fpushs(S0_tos);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_d() {
|
||||
#ifdef AARCH64
|
||||
str_d(D0_tos, Address(Rstack_top, -2*wordSize, pre_indexed));
|
||||
check_stack_top_on_expansion();
|
||||
#else
|
||||
fpushd(D0_tos);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// Transition state -> vtos. Blows Rtemp.
|
||||
@ -530,11 +456,7 @@ void InterpreterMacroAssembler::push(TosState state) {
|
||||
case ctos: // fall through
|
||||
case stos: // fall through
|
||||
case itos: push_i(R0_tos); break;
|
||||
#ifdef AARCH64
|
||||
case ltos: push_l(R0_tos); break;
|
||||
#else
|
||||
case ltos: push_l(R0_tos_lo, R1_tos_hi); break;
|
||||
#endif // AARCH64
|
||||
#ifdef __SOFTFP__
|
||||
case ftos: push_i(R0_tos); break;
|
||||
case dtos: push_l(R0_tos_lo, R1_tos_hi); break;
|
||||
@ -548,7 +470,6 @@ void InterpreterMacroAssembler::push(TosState state) {
|
||||
}
|
||||
|
||||
|
||||
#ifndef AARCH64
|
||||
|
||||
// Converts return value in R0/R1 (interpreter calling conventions) to TOS cached value.
|
||||
void InterpreterMacroAssembler::convert_retval_to_tos(TosState state) {
|
||||
@ -576,7 +497,6 @@ void InterpreterMacroAssembler::convert_tos_to_retval(TosState state) {
|
||||
#endif // !__SOFTFP__ && !__ABI_HARD__
|
||||
}
|
||||
|
||||
#endif // !AARCH64
|
||||
|
||||
|
||||
// Helpers for swap and dup
|
||||
@ -590,20 +510,12 @@ void InterpreterMacroAssembler::store_ptr(int n, Register val) {
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
|
||||
#ifdef AARCH64
|
||||
check_no_cached_stack_top(Rtemp);
|
||||
save_stack_top();
|
||||
cut_sp_before_call();
|
||||
mov(Rparams, Rstack_top);
|
||||
#endif // AARCH64
|
||||
|
||||
// set sender sp
|
||||
mov(Rsender_sp, SP);
|
||||
|
||||
#ifndef AARCH64
|
||||
// record last_sp
|
||||
str(Rsender_sp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
#endif // !AARCH64
|
||||
}
|
||||
|
||||
// Jump to from_interpreted entry of a call unless single stepping is possible
|
||||
@ -619,19 +531,8 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
|
||||
ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
|
||||
#ifdef AARCH64
|
||||
{
|
||||
Label not_interp_only_mode;
|
||||
|
||||
cbz(Rtemp, not_interp_only_mode);
|
||||
indirect_jump(Address(method, Method::interpreter_entry_offset()), Rtemp);
|
||||
|
||||
bind(not_interp_only_mode);
|
||||
}
|
||||
#else
|
||||
cmp(Rtemp, 0);
|
||||
ldr(PC, Address(method, Method::interpreter_entry_offset()), ne);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
indirect_jump(Address(method, Method::from_interpreted_offset()), Rtemp);
|
||||
@ -658,12 +559,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||
bool verifyoop) {
|
||||
if (VerifyActivationFrameSize) {
|
||||
Label L;
|
||||
#ifdef AARCH64
|
||||
mov(Rtemp, SP);
|
||||
sub(Rtemp, FP, Rtemp);
|
||||
#else
|
||||
sub(Rtemp, FP, SP);
|
||||
#endif // AARCH64
|
||||
int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize;
|
||||
cmp(Rtemp, min_frame_size);
|
||||
b(L, ge);
|
||||
@ -692,16 +588,10 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||
if (state == vtos) {
|
||||
indirect_jump(Address::indexed_ptr(RdispatchTable, R3_bytecode), Rtemp);
|
||||
} else {
|
||||
#ifdef AARCH64
|
||||
sub(Rtemp, R3_bytecode, (Interpreter::distance_from_dispatch_table(vtos) -
|
||||
Interpreter::distance_from_dispatch_table(state)));
|
||||
indirect_jump(Address::indexed_ptr(RdispatchTable, Rtemp), Rtemp);
|
||||
#else
|
||||
// on 32-bit ARM this method is faster than the one above.
|
||||
sub(Rtemp, RdispatchTable, (Interpreter::distance_from_dispatch_table(vtos) -
|
||||
Interpreter::distance_from_dispatch_table(state)) * wordSize);
|
||||
indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
assert(table_mode == DispatchNormal, "invalid dispatch table mode");
|
||||
@ -897,25 +787,18 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
|
||||
// points to word before bottom of monitor block
|
||||
|
||||
cmp(Rcur, Rbottom); // check if there are no monitors
|
||||
#ifndef AARCH64
|
||||
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
|
||||
// prefetch monitor's object
|
||||
#endif // !AARCH64
|
||||
b(no_unlock, eq);
|
||||
|
||||
bind(loop);
|
||||
#ifdef AARCH64
|
||||
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
|
||||
#endif // AARCH64
|
||||
// check if current entry is used
|
||||
cbnz(Rcur_obj, exception_monitor_is_still_locked);
|
||||
|
||||
add(Rcur, Rcur, entry_size); // otherwise advance to next entry
|
||||
cmp(Rcur, Rbottom); // check if bottom reached
|
||||
#ifndef AARCH64
|
||||
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
|
||||
// prefetch monitor's object
|
||||
#endif // !AARCH64
|
||||
b(loop, ne); // if not at bottom then check this entry
|
||||
}
|
||||
|
||||
@ -929,15 +812,9 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
|
||||
}
|
||||
|
||||
// remove activation
|
||||
#ifdef AARCH64
|
||||
ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
ldp(FP, LR, Address(FP));
|
||||
mov(SP, Rtemp);
|
||||
#else
|
||||
mov(Rtemp, FP);
|
||||
ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
|
||||
ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
#endif
|
||||
|
||||
if (ret_addr != LR) {
|
||||
mov(ret_addr, LR);
|
||||
@ -965,7 +842,7 @@ void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Reg
|
||||
//
|
||||
// Argument: R1 : Points to BasicObjectLock to be used for locking.
|
||||
// Must be initialized with object to lock.
|
||||
// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. Calls VM.
|
||||
// Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
|
||||
void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
assert(Rlock == R1, "the second argument");
|
||||
|
||||
@ -991,15 +868,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
biased_locking_enter(Robj, Rmark/*scratched*/, R0, false, Rtemp, done, slow_case);
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
|
||||
ldr(Rmark, Robj);
|
||||
|
||||
// Test if object is already locked
|
||||
assert(markOopDesc::unlocked_value == 1, "adjust this code");
|
||||
tbz(Rmark, exact_log2(markOopDesc::unlocked_value), already_locked);
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as ether CAS or slow case path is taken in that case.
|
||||
@ -1013,7 +881,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
tst(Rmark, markOopDesc::unlocked_value);
|
||||
b(already_locked, eq);
|
||||
|
||||
#endif // !AARCH64
|
||||
// Save old object->mark() into BasicLock's displaced header
|
||||
str(Rmark, Address(Rlock, mark_offset));
|
||||
|
||||
@ -1059,19 +926,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
// conditions into a single test:
|
||||
// => ((mark - SP) & (3 - os::pagesize())) == 0
|
||||
|
||||
#ifdef AARCH64
|
||||
// Use the single check since the immediate is OK for AARCH64
|
||||
sub(R0, Rmark, Rstack_top);
|
||||
intptr_t mask = ((intptr_t)3) - ((intptr_t)os::vm_page_size());
|
||||
Assembler::LogicalImmediate imm(mask, false);
|
||||
ands(R0, R0, imm);
|
||||
|
||||
// For recursive case store 0 into lock record.
|
||||
// It is harmless to store it unconditionally as lock record contains some garbage
|
||||
// value in its _displaced_header field by this moment.
|
||||
str(ZR, Address(Rlock, mark_offset));
|
||||
|
||||
#else // AARCH64
|
||||
// (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
|
||||
// Check independently the low bits and the distance to SP.
|
||||
// -1- test low 2 bits
|
||||
@ -1082,7 +936,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
// If still 'eq' then recursive locking OK: store 0 into lock record
|
||||
str(R0, Address(Rlock, mark_offset), eq);
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
@ -1106,7 +959,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
//
|
||||
// Argument: R1: Points to BasicObjectLock structure for lock
|
||||
// Throw an IllegalMonitorException if object is not locked by current thread
|
||||
// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. Calls VM.
|
||||
// Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
|
||||
void InterpreterMacroAssembler::unlock_object(Register Rlock) {
|
||||
assert(Rlock == R1, "the second argument");
|
||||
|
||||
@ -1168,7 +1021,7 @@ void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& ze
|
||||
|
||||
|
||||
// Set the method data pointer for the current bcp.
|
||||
// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
|
||||
// Blows volatile registers R0-R3, Rtemp, LR.
|
||||
void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
Label set_mdp;
|
||||
@ -1265,22 +1118,12 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
|
||||
// Decrement the register. Set condition codes.
|
||||
subs(bumped_count, bumped_count, DataLayout::counter_increment);
|
||||
// Avoid overflow.
|
||||
#ifdef AARCH64
|
||||
assert(DataLayout::counter_increment == 1, "required for cinc");
|
||||
cinc(bumped_count, bumped_count, pl);
|
||||
#else
|
||||
add(bumped_count, bumped_count, DataLayout::counter_increment, pl);
|
||||
#endif // AARCH64
|
||||
} else {
|
||||
// Increment the register. Set condition codes.
|
||||
adds(bumped_count, bumped_count, DataLayout::counter_increment);
|
||||
// Avoid overflow.
|
||||
#ifdef AARCH64
|
||||
assert(DataLayout::counter_increment == 1, "required for cinv");
|
||||
cinv(bumped_count, bumped_count, mi); // inverts 0x80..00 back to 0x7f..ff
|
||||
#else
|
||||
sub(bumped_count, bumped_count, DataLayout::counter_increment, mi);
|
||||
#endif // AARCH64
|
||||
}
|
||||
str(bumped_count, data);
|
||||
}
|
||||
@ -1328,7 +1171,7 @@ void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int cons
|
||||
}
|
||||
|
||||
|
||||
// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// Blows volatile registers R0-R3, Rtemp, LR).
|
||||
void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
assert_different_registers(return_bci, R0, R1, R2, R3, Rtemp);
|
||||
@ -1542,7 +1385,7 @@ void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
|
||||
bind (done);
|
||||
}
|
||||
|
||||
// Sets mdp, blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// Sets mdp, blows volatile registers R0-R3, Rtemp, LR).
|
||||
void InterpreterMacroAssembler::profile_ret(Register mdp, Register return_bci) {
|
||||
assert_different_registers(mdp, return_bci, Rtemp, R0, R1, R2, R3);
|
||||
|
||||
@ -1704,9 +1547,6 @@ void InterpreterMacroAssembler::profile_switch_case(Register mdp, Register index
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::byteswap_u32(Register r, Register rtmp1, Register rtmp2) {
|
||||
#ifdef AARCH64
|
||||
rev_w(r, r);
|
||||
#else
|
||||
if (VM_Version::supports_rev()) {
|
||||
rev(r, r);
|
||||
} else {
|
||||
@ -1715,7 +1555,6 @@ void InterpreterMacroAssembler::byteswap_u32(Register r, Register rtmp1, Registe
|
||||
andr(rtmp1, rtmp2, AsmOperand(rtmp1, lsr, 8));
|
||||
eor(r, rtmp1, AsmOperand(r, ror, 8));
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
@ -1723,7 +1562,7 @@ void InterpreterMacroAssembler::inc_global_counter(address address_of_counter, i
|
||||
const intx addr = (intx) (address_of_counter + offset);
|
||||
|
||||
assert ((addr & 0x3) == 0, "address of counter should be aligned");
|
||||
const intx offset_mask = right_n_bits(AARCH64_ONLY(12 + 2) NOT_AARCH64(12));
|
||||
const intx offset_mask = right_n_bits(12);
|
||||
|
||||
const address base = (address) (addr & ~offset_mask);
|
||||
const int offs = (int) (addr & offset_mask);
|
||||
@ -1736,14 +1575,7 @@ void InterpreterMacroAssembler::inc_global_counter(address address_of_counter, i
|
||||
|
||||
if (avoid_overflow) {
|
||||
adds_32(val, val, 1);
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
b(L, mi);
|
||||
str_32(val, Address(addr_base, offs));
|
||||
bind(L);
|
||||
#else
|
||||
str(val, Address(addr_base, offs), pl);
|
||||
#endif // AARCH64
|
||||
} else {
|
||||
add_32(val, val, 1);
|
||||
str_32(val, Address(addr_base, offs));
|
||||
@ -1823,17 +1655,9 @@ void InterpreterMacroAssembler::notify_method_exit(
|
||||
if (native) {
|
||||
// For c++ and template interpreter push both result registers on the
|
||||
// stack in native, we don't know the state.
|
||||
// On AArch64 result registers are stored into the frame at known locations.
|
||||
// See frame::interpreter_frame_result for code that gets the result values from here.
|
||||
assert(result_lo != noreg, "result registers should be defined");
|
||||
|
||||
#ifdef AARCH64
|
||||
assert(result_hi == noreg, "result_hi is not used on AArch64");
|
||||
assert(result_fp != fnoreg, "FP result register must be defined");
|
||||
|
||||
str_d(result_fp, Address(FP, frame::interpreter_frame_fp_saved_result_offset * wordSize));
|
||||
str(result_lo, Address(FP, frame::interpreter_frame_gp_saved_result_offset * wordSize));
|
||||
#else
|
||||
assert(result_hi != noreg, "result registers should be defined");
|
||||
|
||||
#ifdef __ABI_HARD__
|
||||
@ -1843,20 +1667,14 @@ void InterpreterMacroAssembler::notify_method_exit(
|
||||
#endif // __ABI_HARD__
|
||||
|
||||
push(RegisterSet(result_lo) | RegisterSet(result_hi));
|
||||
#endif // AARCH64
|
||||
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
|
||||
|
||||
#ifdef AARCH64
|
||||
ldr_d(result_fp, Address(FP, frame::interpreter_frame_fp_saved_result_offset * wordSize));
|
||||
ldr(result_lo, Address(FP, frame::interpreter_frame_gp_saved_result_offset * wordSize));
|
||||
#else
|
||||
pop(RegisterSet(result_lo) | RegisterSet(result_hi));
|
||||
#ifdef __ABI_HARD__
|
||||
fldd(result_fp, Address(SP));
|
||||
add(SP, SP, 2 * wordSize);
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
|
||||
} else {
|
||||
// For the template interpreter, the value on tos is the size of the
|
||||
@ -1932,13 +1750,8 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
add(scratch, scratch, increment);
|
||||
str_32(scratch, counter_addr);
|
||||
|
||||
#ifdef AARCH64
|
||||
ldr_u32(scratch2, mask_addr);
|
||||
ands_w(ZR, scratch, scratch2);
|
||||
#else
|
||||
ldr(scratch2, mask_addr);
|
||||
andrs(scratch, scratch, scratch2);
|
||||
#endif // AARCH64
|
||||
b(*where, cond);
|
||||
}
|
||||
|
||||
@ -1959,26 +1772,15 @@ void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
// Save and restore in use caller-saved registers since they will be trashed by call_VM
|
||||
assert(reg1 != noreg, "must specify reg1");
|
||||
assert(reg2 != noreg, "must specify reg2");
|
||||
#ifdef AARCH64
|
||||
assert(reg3 != noreg, "must specify reg3");
|
||||
stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
|
||||
stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
|
||||
#else
|
||||
assert(reg3 == noreg, "must not specify reg3");
|
||||
push(RegisterSet(reg1) | RegisterSet(reg2));
|
||||
#endif
|
||||
}
|
||||
|
||||
mov(R1, method);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
|
||||
|
||||
if (saveRegs) {
|
||||
#ifdef AARCH64
|
||||
ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
|
||||
ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
|
||||
#else
|
||||
pop(RegisterSet(reg1) | RegisterSet(reg2));
|
||||
#endif
|
||||
}
|
||||
|
||||
ldr(Rcounters, method_counters);
|
||||
|
@ -63,48 +63,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
virtual void check_and_handle_earlyret();
|
||||
|
||||
// Interpreter-specific registers
|
||||
#if defined(AARCH64) && defined(ASSERT)
|
||||
|
||||
#define check_stack_top() _check_stack_top("invalid Rstack_top at " __FILE__ ":" XSTR(__LINE__))
|
||||
#define check_stack_top_on_expansion() _check_stack_top("invalid Rstack_top at " __FILE__ ":" XSTR(__LINE__), VerifyInterpreterStackTop)
|
||||
#define check_extended_sp(tmp) _check_extended_sp(tmp, "SP does not match extended SP in frame at " __FILE__ ":" XSTR(__LINE__))
|
||||
#define check_no_cached_stack_top(tmp) _check_no_cached_stack_top(tmp, "stack_top is already cached in frame at " __FILE__ ":" XSTR(__LINE__))
|
||||
|
||||
void _check_stack_top(const char* msg, bool enabled = true) {
|
||||
if (enabled) {
|
||||
Label L;
|
||||
cmp(SP, Rstack_top);
|
||||
b(L, ls);
|
||||
stop(msg);
|
||||
bind(L);
|
||||
}
|
||||
}
|
||||
|
||||
void _check_extended_sp(Register tmp, const char* msg) {
|
||||
Label L;
|
||||
ldr(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
|
||||
cmp(SP, tmp);
|
||||
b(L, eq);
|
||||
stop(msg);
|
||||
bind(L);
|
||||
}
|
||||
|
||||
void _check_no_cached_stack_top(Register tmp, const char* msg) {
|
||||
Label L;
|
||||
ldr(tmp, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize));
|
||||
cbz(tmp, L);
|
||||
stop(msg);
|
||||
bind(L);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
inline void check_stack_top() {}
|
||||
inline void check_stack_top_on_expansion() {}
|
||||
inline void check_extended_sp(Register tmp) {}
|
||||
inline void check_no_cached_stack_top(Register tmp) {}
|
||||
|
||||
#endif // AARCH64 && ASSERT
|
||||
|
||||
void save_bcp() { str(Rbcp, Address(FP, frame::interpreter_frame_bcp_offset * wordSize)); }
|
||||
void restore_bcp() { ldr(Rbcp, Address(FP, frame::interpreter_frame_bcp_offset * wordSize)); }
|
||||
@ -112,13 +76,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void restore_method() { ldr(Rmethod, Address(FP, frame::interpreter_frame_method_offset * wordSize)); }
|
||||
void restore_dispatch();
|
||||
|
||||
#ifdef AARCH64
|
||||
void save_stack_top() { check_stack_top(); str(Rstack_top, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize)); }
|
||||
void clear_cached_stack_top() { str(ZR, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize)); }
|
||||
void restore_stack_top() { ldr(Rstack_top, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize)); clear_cached_stack_top(); check_stack_top(); }
|
||||
void cut_sp_before_call() { align_reg(SP, Rstack_top, StackAlignmentInBytes); }
|
||||
void restore_sp_after_call(Register tmp) { ldr(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); mov(SP, tmp); }
|
||||
#endif
|
||||
|
||||
// Helpers for runtime call arguments/results
|
||||
void get_const(Register reg) { ldr(reg, Address(Rmethod, Method::const_offset())); }
|
||||
@ -145,21 +102,13 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void pop_ptr(Register r);
|
||||
void pop_i(Register r = R0_tos);
|
||||
#ifdef AARCH64
|
||||
void pop_l(Register r = R0_tos);
|
||||
#else
|
||||
void pop_l(Register lo = R0_tos_lo, Register hi = R1_tos_hi);
|
||||
#endif
|
||||
void pop_f(FloatRegister fd);
|
||||
void pop_d(FloatRegister fd);
|
||||
|
||||
void push_ptr(Register r);
|
||||
void push_i(Register r = R0_tos);
|
||||
#ifdef AARCH64
|
||||
void push_l(Register r = R0_tos);
|
||||
#else
|
||||
void push_l(Register lo = R0_tos_lo, Register hi = R1_tos_hi);
|
||||
#endif
|
||||
void push_f();
|
||||
void push_d();
|
||||
|
||||
@ -168,7 +117,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// Transition state -> vtos. Blows Rtemp.
|
||||
void push(TosState state);
|
||||
|
||||
#ifndef AARCH64
|
||||
// The following methods are overridden to allow overloaded calls to
|
||||
// MacroAssembler::push/pop(Register)
|
||||
// MacroAssembler::push/pop(RegisterSet)
|
||||
@ -183,7 +131,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void convert_retval_to_tos(TosState state);
|
||||
// Converts TOS cached value to return value in R0/R1 (according to interpreter calling conventions).
|
||||
void convert_tos_to_retval(TosState state);
|
||||
#endif
|
||||
|
||||
// JVMTI ForceEarlyReturn support
|
||||
void load_earlyret_value(TosState state);
|
||||
@ -194,12 +141,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void empty_expression_stack() {
|
||||
ldr(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
check_stack_top();
|
||||
#ifdef AARCH64
|
||||
clear_cached_stack_top();
|
||||
#else
|
||||
// NULL last_sp until next java call
|
||||
str(zero_register(Rtemp), Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// Helpers for swap and dup
|
||||
|
@ -44,13 +44,9 @@ InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
|
||||
_abi_offset = 0;
|
||||
_ireg = is_static() ? 2 : 1;
|
||||
#ifdef __ABI_HARD__
|
||||
#ifdef AARCH64
|
||||
_freg = 0;
|
||||
#else
|
||||
_fp_slot = 0;
|
||||
_single_fpr_slot = 0;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef SHARING_FAST_NATIVE_FINGERPRINTS
|
||||
@ -126,17 +122,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
|
||||
#ifdef AARCH64
|
||||
if (_ireg < GPR_PARAMS) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ ldr(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
_ireg++;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if (_ireg <= 2) {
|
||||
#if (ALIGN_WIDE_ARGUMENTS == 1)
|
||||
if ((_ireg & 1) != 0) {
|
||||
@ -170,24 +155,9 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
|
||||
_abi_offset += 2;
|
||||
_ireg = 4;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
#ifdef AARCH64
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ cmp(Rtemp, 0);
|
||||
__ sub(Rtemp, Rlocals, -Interpreter::local_offset_in_bytes(offset()));
|
||||
if (_ireg < GPR_PARAMS) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ csel(dst, ZR, Rtemp, eq);
|
||||
_ireg++;
|
||||
} else {
|
||||
__ csel(Rtemp, ZR, Rtemp, eq);
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if (_ireg < 4) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ ldr(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
@ -201,7 +171,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
#ifndef __ABI_HARD__
|
||||
@ -220,17 +189,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
#else
|
||||
#ifndef __SOFTFP__
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
#ifdef AARCH64
|
||||
if (_freg < FPR_PARAMS) {
|
||||
FloatRegister dst = as_FloatRegister(_freg);
|
||||
__ ldr_s(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
_freg++;
|
||||
} else {
|
||||
__ ldr_u32(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ str_32(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if((_fp_slot < 16) || (_single_fpr_slot & 1)) {
|
||||
if ((_single_fpr_slot & 1) == 0) {
|
||||
_single_fpr_slot = _fp_slot;
|
||||
@ -243,21 +201,9 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
|
||||
#ifdef AARCH64
|
||||
if (_freg < FPR_PARAMS) {
|
||||
FloatRegister dst = as_FloatRegister(_freg);
|
||||
__ ldr_d(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
_freg++;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if(_fp_slot <= 14) {
|
||||
__ fldd(as_FloatRegister(_fp_slot), Address(Rlocals, Interpreter::local_offset_in_bytes(offset()+1)));
|
||||
_fp_slot += 2;
|
||||
@ -269,7 +215,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
|
||||
_abi_offset += 2;
|
||||
_single_fpr_slot = 16;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
#endif // __SOFTFP__
|
||||
#endif // __ABI_HARD__
|
||||
@ -281,14 +226,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprin
|
||||
|
||||
address result_handler = Interpreter::result_handler(result_type);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ mov_slow(R0, (address)result_handler);
|
||||
#else
|
||||
// Check that result handlers are not real handler on ARM (0 or -1).
|
||||
// This ensures the signature handlers do not need symbolic information.
|
||||
assert((result_handler == NULL)||(result_handler==(address)0xffffffff),"");
|
||||
__ mov_slow(R0, (intptr_t)result_handler);
|
||||
#endif
|
||||
|
||||
__ ret();
|
||||
}
|
||||
@ -339,9 +280,7 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
intptr_t* _toGP;
|
||||
int _last_gp;
|
||||
int _last_fp;
|
||||
#ifndef AARCH64
|
||||
int _last_single_fp;
|
||||
#endif // !AARCH64
|
||||
|
||||
virtual void pass_int() {
|
||||
if(_last_gp < GPR_PARAMS) {
|
||||
@ -353,13 +292,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
}
|
||||
|
||||
virtual void pass_long() {
|
||||
#ifdef AARCH64
|
||||
if(_last_gp < GPR_PARAMS) {
|
||||
_toGP[_last_gp++] = *(jlong *)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
} else {
|
||||
*_to++ = *(jlong *)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
}
|
||||
#else
|
||||
assert(ALIGN_WIDE_ARGUMENTS == 1, "ABI_HARD not supported with unaligned wide arguments");
|
||||
if (_last_gp <= 2) {
|
||||
if(_last_gp & 1) _last_gp++;
|
||||
@ -375,7 +307,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
_to += 2;
|
||||
_last_gp = 4;
|
||||
}
|
||||
#endif // AARCH64
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
@ -390,13 +321,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
}
|
||||
|
||||
virtual void pass_float() {
|
||||
#ifdef AARCH64
|
||||
if(_last_fp < FPR_PARAMS) {
|
||||
_toFP[_last_fp++] = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
} else {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
}
|
||||
#else
|
||||
if((_last_fp < 16) || (_last_single_fp & 1)) {
|
||||
if ((_last_single_fp & 1) == 0) {
|
||||
_last_single_fp = _last_fp;
|
||||
@ -407,18 +331,10 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
} else {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
}
|
||||
#endif // AARCH64
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_double() {
|
||||
#ifdef AARCH64
|
||||
if(_last_fp < FPR_PARAMS) {
|
||||
_toFP[_last_fp++] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
} else {
|
||||
*_to++ = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
}
|
||||
#else
|
||||
assert(ALIGN_WIDE_ARGUMENTS == 1, "ABI_HARD not supported with unaligned wide arguments");
|
||||
if(_last_fp <= 14) {
|
||||
_toFP[_last_fp++] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
@ -432,7 +348,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
_to += 2;
|
||||
_last_single_fp = 16;
|
||||
}
|
||||
#endif // AARCH64
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
@ -446,12 +361,10 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
#ifdef __ABI_HARD__
|
||||
_toGP = to;
|
||||
_toFP = _toGP + GPR_PARAMS;
|
||||
_to = _toFP + AARCH64_ONLY(FPR_PARAMS) NOT_AARCH64(8*2);
|
||||
_to = _toFP + (8*2);
|
||||
_last_gp = (is_static() ? 2 : 1);
|
||||
_last_fp = 0;
|
||||
#ifndef AARCH64
|
||||
_last_single_fp = 0;
|
||||
#endif // !AARCH64
|
||||
#else
|
||||
_to = to + (is_static() ? 2 : 1);
|
||||
#endif // __ABI_HARD__
|
||||
|
@ -34,12 +34,8 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
|
||||
int _ireg;
|
||||
|
||||
#ifdef __ABI_HARD__
|
||||
#ifdef AARCH64
|
||||
int _freg;
|
||||
#else
|
||||
int _fp_slot; // number of FPR's with arguments loaded
|
||||
int _single_fpr_slot;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void move(int from_offset, int to_offset);
|
||||
@ -60,10 +56,8 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
|
||||
void generate(uint64_t fingerprint);
|
||||
};
|
||||
|
||||
#ifndef AARCH64
|
||||
// ARM provides a normalized fingerprint for native calls (to increase
|
||||
// sharing). See normalize_fast_native_fingerprint
|
||||
#define SHARING_FAST_NATIVE_FINGERPRINTS
|
||||
#endif
|
||||
|
||||
#endif // CPU_ARM_VM_INTERPRETERRT_ARM_HPP
|
||||
|
@ -78,26 +78,19 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
// R1 - object handle
|
||||
// R2 - jfieldID
|
||||
|
||||
const Register Rsafepoint_counter_addr = AARCH64_ONLY(R4) NOT_AARCH64(R3);
|
||||
const Register Robj = AARCH64_ONLY(R5) NOT_AARCH64(R1);
|
||||
const Register Rres = AARCH64_ONLY(R6) NOT_AARCH64(R0);
|
||||
#ifndef AARCH64
|
||||
const Register Rsafepoint_counter_addr = R3;
|
||||
const Register Robj = R1;
|
||||
const Register Rres = R0;
|
||||
const Register Rres_hi = R1;
|
||||
#endif // !AARCH64
|
||||
const Register Rsafept_cnt = Rtemp;
|
||||
const Register Rsafept_cnt2 = Rsafepoint_counter_addr;
|
||||
const Register Rtmp1 = AARCH64_ONLY(R7) NOT_AARCH64(R3); // same as Rsafepoint_counter_addr on 32-bit ARM
|
||||
const Register Rtmp2 = AARCH64_ONLY(R8) NOT_AARCH64(R2); // same as jfieldID on 32-bit ARM
|
||||
const Register Rtmp1 = R3; // same as Rsafepoint_counter_addr
|
||||
const Register Rtmp2 = R2; // same as jfieldID
|
||||
|
||||
#ifdef AARCH64
|
||||
assert_different_registers(Rsafepoint_counter_addr, Rsafept_cnt, Robj, Rres, Rtmp1, Rtmp2, R0, R1, R2, LR);
|
||||
assert_different_registers(Rsafept_cnt2, Rsafept_cnt, Rres, R0, R1, R2, LR);
|
||||
#else
|
||||
assert_different_registers(Rsafepoint_counter_addr, Rsafept_cnt, Robj, Rres, LR);
|
||||
assert_different_registers(Rsafept_cnt, R1, R2, Rtmp1, LR);
|
||||
assert_different_registers(Rsafepoint_counter_addr, Rsafept_cnt, Rres, Rres_hi, Rtmp2, LR);
|
||||
assert_different_registers(Rsafept_cnt2, Rsafept_cnt, Rres, Rres_hi, LR);
|
||||
#endif // AARCH64
|
||||
|
||||
address fast_entry;
|
||||
|
||||
@ -112,29 +105,17 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
Label slow_case;
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
|
||||
#ifndef AARCH64
|
||||
__ push(RegisterSet(R0, R3)); // save incoming arguments for slow case
|
||||
#endif // !AARCH64
|
||||
|
||||
__ ldr_s32(Rsafept_cnt, Address(Rsafepoint_counter_addr));
|
||||
__ tbnz(Rsafept_cnt, 0, slow_case);
|
||||
|
||||
#ifdef AARCH64
|
||||
// If mask changes we need to ensure that the inverse is still encodable as an immediate
|
||||
STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
|
||||
__ andr(R1, R1, ~JNIHandles::weak_tag_mask);
|
||||
#else
|
||||
__ bic(R1, R1, JNIHandles::weak_tag_mask);
|
||||
#endif
|
||||
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
__ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
|
||||
__ ldr(Robj, Address(R1, Rtmp1));
|
||||
|
||||
#ifdef AARCH64
|
||||
__ add(Robj, Robj, AsmOperand(R2, lsr, 2));
|
||||
Address field_addr = Address(Robj);
|
||||
#else
|
||||
Address field_addr;
|
||||
if (type != T_BOOLEAN
|
||||
&& type != T_INT
|
||||
@ -148,7 +129,6 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
} else {
|
||||
field_addr = Address(Robj, R2, lsr, 2);
|
||||
}
|
||||
#endif // AARCH64
|
||||
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
|
||||
speculative_load_pclist[count] = __ pc();
|
||||
|
||||
@ -175,12 +155,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
#ifndef __ABI_HARD__
|
||||
case T_DOUBLE:
|
||||
#endif
|
||||
#ifdef AARCH64
|
||||
__ ldr(Rres, field_addr);
|
||||
#else
|
||||
// Safe to use ldrd since long and double fields are 8-byte aligned
|
||||
__ ldrd(Rres, field_addr);
|
||||
#endif // AARCH64
|
||||
break;
|
||||
#ifdef __ABI_HARD__
|
||||
case T_FLOAT:
|
||||
@ -195,38 +171,28 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
}
|
||||
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
#if defined(__ABI_HARD__) && !defined(AARCH64)
|
||||
#ifdef __ABI_HARD__
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
__ fmrrd(Rres, Rres_hi, D0);
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
} else
|
||||
#endif // __ABI_HARD__ && !AARCH64
|
||||
#endif // __ABI_HARD__
|
||||
{
|
||||
#ifndef AARCH64
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
#endif // !AARCH64
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
}
|
||||
__ cmp(Rsafept_cnt2, Rsafept_cnt);
|
||||
#ifdef AARCH64
|
||||
__ b(slow_case, ne);
|
||||
__ mov(R0, Rres);
|
||||
__ ret();
|
||||
#else
|
||||
// discards saved R0 R1 R2 R3
|
||||
__ add(SP, SP, 4 * wordSize, eq);
|
||||
__ bx(LR, eq);
|
||||
#endif // AARCH64
|
||||
|
||||
slowcase_entry_pclist[count++] = __ pc();
|
||||
|
||||
__ bind(slow_case);
|
||||
#ifndef AARCH64
|
||||
__ pop(RegisterSet(R0, R3));
|
||||
#endif // !AARCH64
|
||||
// thumb mode switch handled by MacroAssembler::jump if needed
|
||||
__ jump(slow_case_addr, relocInfo::none, Rtemp);
|
||||
|
||||
|
@ -44,12 +44,10 @@ class JNITypes : AllStatic {
|
||||
|
||||
private:
|
||||
|
||||
#ifndef AARCH64
|
||||
// 32bit Helper routines.
|
||||
static inline void put_int2r(jint *from, intptr_t *to) { *(jint *)(to++) = from[1];
|
||||
*(jint *)(to ) = from[0]; }
|
||||
static inline void put_int2r(jint *from, intptr_t *to, int& pos) { put_int2r(from, to + pos); pos += 2; }
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Ints are stored in native format in one JavaCallArgument slot at *to.
|
||||
@ -57,18 +55,11 @@ public:
|
||||
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
|
||||
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
|
||||
|
||||
#ifdef AARCH64
|
||||
// Longs are stored in native format in one JavaCallArgument slot at *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; }
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; }
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
|
||||
#else
|
||||
// Longs are stored in big-endian word format in two JavaCallArgument slots at *to.
|
||||
// The high half is in *to and the low half in *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, to); }
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
|
||||
#endif
|
||||
|
||||
// Oops are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
|
||||
@ -80,18 +71,11 @@ public:
|
||||
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
|
||||
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
|
||||
|
||||
#ifdef AARCH64
|
||||
// Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; }
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; }
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
|
||||
#else
|
||||
// Doubles are stored in big-endian word format in two JavaCallArgument slots at *to.
|
||||
// The high half is in *to and the low half in *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, to); }
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -229,10 +229,6 @@ public:
|
||||
// this was subsequently modified to its present name and return type
|
||||
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
|
||||
|
||||
#ifdef AARCH64
|
||||
# define NOT_IMPLEMENTED() unimplemented("NYI at " __FILE__ ":" XSTR(__LINE__))
|
||||
# define NOT_TESTED() warn("Not tested at " __FILE__ ":" XSTR(__LINE__))
|
||||
#endif
|
||||
|
||||
void align(int modulus);
|
||||
|
||||
@ -275,7 +271,7 @@ public:
|
||||
|
||||
// Always sets/resets sp, which default to SP if (last_sp == noreg)
|
||||
// Optionally sets/resets fp (use noreg to avoid setting it)
|
||||
// Always sets/resets pc on AArch64; optionally sets/resets pc on 32-bit ARM depending on save_last_java_pc flag
|
||||
// Optionally sets/resets pc depending on save_last_java_pc flag
|
||||
// Note: when saving PC, set_last_Java_frame returns PC's offset in the code section
|
||||
// (for oop_maps offset computation)
|
||||
int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp);
|
||||
@ -399,7 +395,6 @@ public:
|
||||
|
||||
void resolve_jobject(Register value, Register tmp1, Register tmp2);
|
||||
|
||||
#ifndef AARCH64
|
||||
void nop() {
|
||||
mov(R0, R0);
|
||||
}
|
||||
@ -439,7 +434,6 @@ public:
|
||||
void fpops(FloatRegister fd, AsmCondition cond = al) {
|
||||
fldmias(SP, FloatRegisterSet(fd), writeback, cond);
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
// Order access primitives
|
||||
enum Membar_mask_bits {
|
||||
@ -449,15 +443,10 @@ public:
|
||||
LoadLoad = 1 << 0
|
||||
};
|
||||
|
||||
#ifdef AARCH64
|
||||
// tmp register is not used on AArch64, this parameter is provided solely for better compatibility with 32-bit ARM
|
||||
void membar(Membar_mask_bits order_constraint, Register tmp = noreg);
|
||||
#else
|
||||
void membar(Membar_mask_bits mask,
|
||||
Register tmp,
|
||||
bool preserve_flags = true,
|
||||
Register load_tgt = noreg);
|
||||
#endif
|
||||
|
||||
void breakpoint(AsmCondition cond = al);
|
||||
void stop(const char* msg);
|
||||
@ -489,47 +478,28 @@ public:
|
||||
void add_slow(Register rd, Register rn, int c);
|
||||
void sub_slow(Register rd, Register rn, int c);
|
||||
|
||||
#ifdef AARCH64
|
||||
static int mov_slow_helper(Register rd, intptr_t c, MacroAssembler* masm /* optional */);
|
||||
#endif
|
||||
|
||||
void mov_slow(Register rd, intptr_t c NOT_AARCH64_ARG(AsmCondition cond = al));
|
||||
void mov_slow(Register rd, intptr_t c, AsmCondition cond = al);
|
||||
void mov_slow(Register rd, const char *string);
|
||||
void mov_slow(Register rd, address addr);
|
||||
|
||||
void patchable_mov_oop(Register rd, jobject o, int oop_index) {
|
||||
mov_oop(rd, o, oop_index AARCH64_ONLY_ARG(true));
|
||||
mov_oop(rd, o, oop_index);
|
||||
}
|
||||
void mov_oop(Register rd, jobject o, int index = 0
|
||||
AARCH64_ONLY_ARG(bool patchable = false)
|
||||
NOT_AARCH64_ARG(AsmCondition cond = al));
|
||||
|
||||
void mov_oop(Register rd, jobject o, int index = 0, AsmCondition cond = al);
|
||||
|
||||
void patchable_mov_metadata(Register rd, Metadata* o, int index) {
|
||||
mov_metadata(rd, o, index AARCH64_ONLY_ARG(true));
|
||||
mov_metadata(rd, o, index);
|
||||
}
|
||||
void mov_metadata(Register rd, Metadata* o, int index = 0 AARCH64_ONLY_ARG(bool patchable = false));
|
||||
void mov_metadata(Register rd, Metadata* o, int index = 0);
|
||||
|
||||
void mov_float(FloatRegister fd, jfloat c NOT_AARCH64_ARG(AsmCondition cond = al));
|
||||
void mov_double(FloatRegister fd, jdouble c NOT_AARCH64_ARG(AsmCondition cond = al));
|
||||
void mov_float(FloatRegister fd, jfloat c, AsmCondition cond = al);
|
||||
void mov_double(FloatRegister fd, jdouble c, AsmCondition cond = al);
|
||||
|
||||
#ifdef AARCH64
|
||||
int mov_pc_to(Register rd) {
|
||||
Label L;
|
||||
adr(rd, L);
|
||||
bind(L);
|
||||
return offset();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Note: this variant of mov_address assumes the address moves with
|
||||
// the code. Do *not* implement it with non-relocated instructions,
|
||||
// unless PC-relative.
|
||||
#ifdef AARCH64
|
||||
void mov_relative_address(Register rd, address addr) {
|
||||
adr(rd, addr);
|
||||
}
|
||||
#else
|
||||
void mov_relative_address(Register rd, address addr, AsmCondition cond = al) {
|
||||
int offset = addr - pc() - 8;
|
||||
assert((offset & 3) == 0, "bad alignment");
|
||||
@ -541,7 +511,6 @@ public:
|
||||
sub(rd, PC, -offset, cond);
|
||||
}
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
// Runtime address that may vary from one execution to another. The
|
||||
// symbolic_reference describes what the address is, allowing
|
||||
@ -562,7 +531,6 @@ public:
|
||||
mov_slow(rd, (intptr_t)addr);
|
||||
return;
|
||||
}
|
||||
#ifndef AARCH64
|
||||
if (VM_Version::supports_movw()) {
|
||||
relocate(rspec);
|
||||
int c = (int)addr;
|
||||
@ -572,15 +540,11 @@ public:
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
Label skip_literal;
|
||||
InlinedAddress addr_literal(addr, rspec);
|
||||
ldr_literal(rd, addr_literal);
|
||||
b(skip_literal);
|
||||
bind_literal(addr_literal);
|
||||
// AARCH64 WARNING: because of alignment padding, extra padding
|
||||
// may be required to get a consistent size for C2, or rules must
|
||||
// overestimate size see MachEpilogNode::size
|
||||
bind(skip_literal);
|
||||
}
|
||||
|
||||
@ -594,45 +558,28 @@ public:
|
||||
assert(L.rspec().type() != relocInfo::runtime_call_type, "avoid ldr_literal for calls");
|
||||
assert(L.rspec().type() != relocInfo::static_call_type, "avoid ldr_literal for calls");
|
||||
relocate(L.rspec());
|
||||
#ifdef AARCH64
|
||||
ldr(rd, target(L.label));
|
||||
#else
|
||||
ldr(rd, Address(PC, target(L.label) - pc() - 8));
|
||||
#endif
|
||||
}
|
||||
|
||||
void ldr_literal(Register rd, InlinedString& L) {
|
||||
const char* msg = L.msg();
|
||||
if (code()->consts()->contains((address)msg)) {
|
||||
// string address moves with the code
|
||||
#ifdef AARCH64
|
||||
ldr(rd, (address)msg);
|
||||
#else
|
||||
ldr(rd, Address(PC, ((address)msg) - pc() - 8));
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
// Warning: use external strings with care. They are not relocated
|
||||
// if the code moves. If needed, use code_string to move them
|
||||
// to the consts section.
|
||||
#ifdef AARCH64
|
||||
ldr(rd, target(L.label));
|
||||
#else
|
||||
ldr(rd, Address(PC, target(L.label) - pc() - 8));
|
||||
#endif
|
||||
}
|
||||
|
||||
void ldr_literal(Register rd, InlinedMetadata& L) {
|
||||
// relocation done in the bind_literal for metadatas
|
||||
#ifdef AARCH64
|
||||
ldr(rd, target(L.label));
|
||||
#else
|
||||
ldr(rd, Address(PC, target(L.label) - pc() - 8));
|
||||
#endif
|
||||
}
|
||||
|
||||
void bind_literal(InlinedAddress& L) {
|
||||
AARCH64_ONLY(align(wordSize));
|
||||
bind(L.label);
|
||||
assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata");
|
||||
// We currently do not use oop 'bound' literals.
|
||||
@ -650,13 +597,11 @@ public:
|
||||
// to detect errors.
|
||||
return;
|
||||
}
|
||||
AARCH64_ONLY(align(wordSize));
|
||||
bind(L.label);
|
||||
AbstractAssembler::emit_address((address)L.msg());
|
||||
}
|
||||
|
||||
void bind_literal(InlinedMetadata& L) {
|
||||
AARCH64_ONLY(align(wordSize));
|
||||
bind(L.label);
|
||||
relocate(metadata_Relocation::spec_for_immediate());
|
||||
AbstractAssembler::emit_address((address)L.data());
|
||||
@ -665,138 +610,106 @@ public:
|
||||
void resolve_oop_handle(Register result);
|
||||
void load_mirror(Register mirror, Register method, Register tmp);
|
||||
|
||||
// Porting layer between 32-bit ARM and AArch64
|
||||
|
||||
#define COMMON_INSTR_1(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg_type) \
|
||||
#define ARM_INSTR_1(common_mnemonic, arm32_mnemonic, arg_type) \
|
||||
void common_mnemonic(arg_type arg) { \
|
||||
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg); \
|
||||
arm32_mnemonic(arg); \
|
||||
}
|
||||
|
||||
#define COMMON_INSTR_2(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \
|
||||
#define ARM_INSTR_2(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \
|
||||
void common_mnemonic(arg1_type arg1, arg2_type arg2) { \
|
||||
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2); \
|
||||
arm32_mnemonic(arg1, arg2); \
|
||||
}
|
||||
|
||||
#define COMMON_INSTR_3(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \
|
||||
#define ARM_INSTR_3(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \
|
||||
void common_mnemonic(arg1_type arg1, arg2_type arg2, arg3_type arg3) { \
|
||||
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2, arg3); \
|
||||
arm32_mnemonic(arg1, arg2, arg3); \
|
||||
}
|
||||
|
||||
COMMON_INSTR_1(jump, br, bx, Register)
|
||||
COMMON_INSTR_1(call, blr, blx, Register)
|
||||
ARM_INSTR_1(jump, bx, Register)
|
||||
ARM_INSTR_1(call, blx, Register)
|
||||
|
||||
COMMON_INSTR_2(cbz_32, cbz_w, cbz, Register, Label&)
|
||||
COMMON_INSTR_2(cbnz_32, cbnz_w, cbnz, Register, Label&)
|
||||
ARM_INSTR_2(cbz_32, cbz, Register, Label&)
|
||||
ARM_INSTR_2(cbnz_32, cbnz, Register, Label&)
|
||||
|
||||
COMMON_INSTR_2(ldr_u32, ldr_w, ldr, Register, Address)
|
||||
COMMON_INSTR_2(ldr_s32, ldrsw, ldr, Register, Address)
|
||||
COMMON_INSTR_2(str_32, str_w, str, Register, Address)
|
||||
ARM_INSTR_2(ldr_u32, ldr, Register, Address)
|
||||
ARM_INSTR_2(ldr_s32, ldr, Register, Address)
|
||||
ARM_INSTR_2(str_32, str, Register, Address)
|
||||
|
||||
COMMON_INSTR_2(mvn_32, mvn_w, mvn, Register, Register)
|
||||
COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, Register)
|
||||
COMMON_INSTR_2(neg_32, neg_w, neg, Register, Register)
|
||||
COMMON_INSTR_2(clz_32, clz_w, clz, Register, Register)
|
||||
COMMON_INSTR_2(rbit_32, rbit_w, rbit, Register, Register)
|
||||
ARM_INSTR_2(mvn_32, mvn, Register, Register)
|
||||
ARM_INSTR_2(cmp_32, cmp, Register, Register)
|
||||
ARM_INSTR_2(neg_32, neg, Register, Register)
|
||||
ARM_INSTR_2(clz_32, clz, Register, Register)
|
||||
ARM_INSTR_2(rbit_32, rbit, Register, Register)
|
||||
|
||||
COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, int)
|
||||
COMMON_INSTR_2(cmn_32, cmn_w, cmn, Register, int)
|
||||
ARM_INSTR_2(cmp_32, cmp, Register, int)
|
||||
ARM_INSTR_2(cmn_32, cmn, Register, int)
|
||||
|
||||
COMMON_INSTR_3(add_32, add_w, add, Register, Register, Register)
|
||||
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, Register)
|
||||
COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, Register)
|
||||
COMMON_INSTR_3(mul_32, mul_w, mul, Register, Register, Register)
|
||||
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, Register)
|
||||
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, Register)
|
||||
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, Register)
|
||||
ARM_INSTR_3(add_32, add, Register, Register, Register)
|
||||
ARM_INSTR_3(sub_32, sub, Register, Register, Register)
|
||||
ARM_INSTR_3(subs_32, subs, Register, Register, Register)
|
||||
ARM_INSTR_3(mul_32, mul, Register, Register, Register)
|
||||
ARM_INSTR_3(and_32, andr, Register, Register, Register)
|
||||
ARM_INSTR_3(orr_32, orr, Register, Register, Register)
|
||||
ARM_INSTR_3(eor_32, eor, Register, Register, Register)
|
||||
|
||||
COMMON_INSTR_3(add_32, add_w, add, Register, Register, AsmOperand)
|
||||
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, AsmOperand)
|
||||
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, AsmOperand)
|
||||
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, AsmOperand)
|
||||
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, AsmOperand)
|
||||
ARM_INSTR_3(add_32, add, Register, Register, AsmOperand)
|
||||
ARM_INSTR_3(sub_32, sub, Register, Register, AsmOperand)
|
||||
ARM_INSTR_3(orr_32, orr, Register, Register, AsmOperand)
|
||||
ARM_INSTR_3(eor_32, eor, Register, Register, AsmOperand)
|
||||
ARM_INSTR_3(and_32, andr, Register, Register, AsmOperand)
|
||||
|
||||
|
||||
COMMON_INSTR_3(add_32, add_w, add, Register, Register, int)
|
||||
COMMON_INSTR_3(adds_32, adds_w, adds, Register, Register, int)
|
||||
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, int)
|
||||
COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, int)
|
||||
ARM_INSTR_3(add_32, add, Register, Register, int)
|
||||
ARM_INSTR_3(adds_32, adds, Register, Register, int)
|
||||
ARM_INSTR_3(sub_32, sub, Register, Register, int)
|
||||
ARM_INSTR_3(subs_32, subs, Register, Register, int)
|
||||
|
||||
COMMON_INSTR_2(tst_32, tst_w, tst, Register, unsigned int)
|
||||
COMMON_INSTR_2(tst_32, tst_w, tst, Register, AsmOperand)
|
||||
ARM_INSTR_2(tst_32, tst, Register, unsigned int)
|
||||
ARM_INSTR_2(tst_32, tst, Register, AsmOperand)
|
||||
|
||||
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, uint)
|
||||
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, uint)
|
||||
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, uint)
|
||||
ARM_INSTR_3(and_32, andr, Register, Register, uint)
|
||||
ARM_INSTR_3(orr_32, orr, Register, Register, uint)
|
||||
ARM_INSTR_3(eor_32, eor, Register, Register, uint)
|
||||
|
||||
COMMON_INSTR_1(cmp_zero_float, fcmp0_s, fcmpzs, FloatRegister)
|
||||
COMMON_INSTR_1(cmp_zero_double, fcmp0_d, fcmpzd, FloatRegister)
|
||||
ARM_INSTR_1(cmp_zero_float, fcmpzs, FloatRegister)
|
||||
ARM_INSTR_1(cmp_zero_double, fcmpzd, FloatRegister)
|
||||
|
||||
COMMON_INSTR_2(ldr_float, ldr_s, flds, FloatRegister, Address)
|
||||
COMMON_INSTR_2(str_float, str_s, fsts, FloatRegister, Address)
|
||||
COMMON_INSTR_2(mov_float, fmov_s, fcpys, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(neg_float, fneg_s, fnegs, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(abs_float, fabs_s, fabss, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(sqrt_float, fsqrt_s, fsqrts, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(cmp_float, fcmp_s, fcmps, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(ldr_float, flds, FloatRegister, Address)
|
||||
ARM_INSTR_2(str_float, fsts, FloatRegister, Address)
|
||||
ARM_INSTR_2(mov_float, fcpys, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(neg_float, fnegs, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(abs_float, fabss, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(sqrt_float, fsqrts, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(cmp_float, fcmps, FloatRegister, FloatRegister)
|
||||
|
||||
COMMON_INSTR_3(add_float, fadd_s, fadds, FloatRegister, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_3(sub_float, fsub_s, fsubs, FloatRegister, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_3(mul_float, fmul_s, fmuls, FloatRegister, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_3(div_float, fdiv_s, fdivs, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(add_float, fadds, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(sub_float, fsubs, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(mul_float, fmuls, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(div_float, fdivs, FloatRegister, FloatRegister, FloatRegister)
|
||||
|
||||
COMMON_INSTR_2(ldr_double, ldr_d, fldd, FloatRegister, Address)
|
||||
COMMON_INSTR_2(str_double, str_d, fstd, FloatRegister, Address)
|
||||
COMMON_INSTR_2(mov_double, fmov_d, fcpyd, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(neg_double, fneg_d, fnegd, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(cmp_double, fcmp_d, fcmpd, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(abs_double, fabs_d, fabsd, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(sqrt_double, fsqrt_d, fsqrtd, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(ldr_double, fldd, FloatRegister, Address)
|
||||
ARM_INSTR_2(str_double, fstd, FloatRegister, Address)
|
||||
ARM_INSTR_2(mov_double, fcpyd, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(neg_double, fnegd, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(cmp_double, fcmpd, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(abs_double, fabsd, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(sqrt_double, fsqrtd, FloatRegister, FloatRegister)
|
||||
|
||||
COMMON_INSTR_3(add_double, fadd_d, faddd, FloatRegister, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_3(sub_double, fsub_d, fsubd, FloatRegister, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_3(mul_double, fmul_d, fmuld, FloatRegister, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_3(div_double, fdiv_d, fdivd, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(add_double, faddd, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(sub_double, fsubd, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(mul_double, fmuld, FloatRegister, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_3(div_double, fdivd, FloatRegister, FloatRegister, FloatRegister)
|
||||
|
||||
COMMON_INSTR_2(convert_f2d, fcvt_ds, fcvtds, FloatRegister, FloatRegister)
|
||||
COMMON_INSTR_2(convert_d2f, fcvt_sd, fcvtsd, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(convert_f2d, fcvtds, FloatRegister, FloatRegister)
|
||||
ARM_INSTR_2(convert_d2f, fcvtsd, FloatRegister, FloatRegister)
|
||||
|
||||
COMMON_INSTR_2(mov_fpr2gpr_float, fmov_ws, fmrs, Register, FloatRegister)
|
||||
ARM_INSTR_2(mov_fpr2gpr_float, fmrs, Register, FloatRegister)
|
||||
|
||||
#undef COMMON_INSTR_1
|
||||
#undef COMMON_INSTR_2
|
||||
#undef COMMON_INSTR_3
|
||||
#undef ARM_INSTR_1
|
||||
#undef ARM_INSTR_2
|
||||
#undef ARM_INSTR_3
|
||||
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
void mov(Register dst, Register src, AsmCondition cond) {
|
||||
if (cond == al) {
|
||||
mov(dst, src);
|
||||
} else {
|
||||
csel(dst, src, dst, cond);
|
||||
}
|
||||
}
|
||||
|
||||
// Propagate other overloaded "mov" methods from Assembler.
|
||||
void mov(Register dst, Register src) { Assembler::mov(dst, src); }
|
||||
void mov(Register rd, int imm) { Assembler::mov(rd, imm); }
|
||||
|
||||
void mov(Register dst, int imm, AsmCondition cond) {
|
||||
assert(imm == 0 || imm == 1, "");
|
||||
if (imm == 0) {
|
||||
mov(dst, ZR, cond);
|
||||
} else if (imm == 1) {
|
||||
csinc(dst, dst, ZR, inverse(cond));
|
||||
} else if (imm == -1) {
|
||||
csinv(dst, dst, ZR, inverse(cond));
|
||||
} else {
|
||||
fatal("illegal mov(R%d,%d,cond)", dst->encoding(), imm);
|
||||
}
|
||||
}
|
||||
|
||||
void movs(Register dst, Register src) { adds(dst, src, 0); }
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
void tbz(Register rt, int bit, Label& L) {
|
||||
assert(0 <= bit && bit < BitsPerWord, "bit number is out of range");
|
||||
@ -829,166 +742,91 @@ public:
|
||||
bx(dst);
|
||||
}
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
Register zero_register(Register tmp) {
|
||||
#ifdef AARCH64
|
||||
return ZR;
|
||||
#else
|
||||
mov(tmp, 0);
|
||||
return tmp;
|
||||
#endif
|
||||
}
|
||||
|
||||
void logical_shift_left(Register dst, Register src, int shift) {
|
||||
#ifdef AARCH64
|
||||
_lsl(dst, src, shift);
|
||||
#else
|
||||
mov(dst, AsmOperand(src, lsl, shift));
|
||||
#endif
|
||||
}
|
||||
|
||||
void logical_shift_left_32(Register dst, Register src, int shift) {
|
||||
#ifdef AARCH64
|
||||
_lsl_w(dst, src, shift);
|
||||
#else
|
||||
mov(dst, AsmOperand(src, lsl, shift));
|
||||
#endif
|
||||
}
|
||||
|
||||
void logical_shift_right(Register dst, Register src, int shift) {
|
||||
#ifdef AARCH64
|
||||
_lsr(dst, src, shift);
|
||||
#else
|
||||
mov(dst, AsmOperand(src, lsr, shift));
|
||||
#endif
|
||||
}
|
||||
|
||||
void arith_shift_right(Register dst, Register src, int shift) {
|
||||
#ifdef AARCH64
|
||||
_asr(dst, src, shift);
|
||||
#else
|
||||
mov(dst, AsmOperand(src, asr, shift));
|
||||
#endif
|
||||
}
|
||||
|
||||
void asr_32(Register dst, Register src, int shift) {
|
||||
#ifdef AARCH64
|
||||
_asr_w(dst, src, shift);
|
||||
#else
|
||||
mov(dst, AsmOperand(src, asr, shift));
|
||||
#endif
|
||||
}
|
||||
|
||||
// If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold.
|
||||
void cond_cmp(Register r1, Register r2, AsmCondition cond) {
|
||||
#ifdef AARCH64
|
||||
ccmp(r1, r2, flags_for_condition(inverse(cond)), cond);
|
||||
#else
|
||||
cmp(r1, r2, cond);
|
||||
#endif
|
||||
}
|
||||
|
||||
// If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold.
|
||||
void cond_cmp(Register r, int imm, AsmCondition cond) {
|
||||
#ifdef AARCH64
|
||||
ccmp(r, imm, flags_for_condition(inverse(cond)), cond);
|
||||
#else
|
||||
cmp(r, imm, cond);
|
||||
#endif
|
||||
}
|
||||
|
||||
void align_reg(Register dst, Register src, int align) {
|
||||
assert (is_power_of_2(align), "should be");
|
||||
#ifdef AARCH64
|
||||
andr(dst, src, ~(uintx)(align-1));
|
||||
#else
|
||||
bic(dst, src, align-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
void prefetch_read(Address addr) {
|
||||
#ifdef AARCH64
|
||||
prfm(pldl1keep, addr);
|
||||
#else
|
||||
pld(addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
void raw_push(Register r1, Register r2) {
|
||||
#ifdef AARCH64
|
||||
stp(r1, r2, Address(SP, -2*wordSize, pre_indexed));
|
||||
#else
|
||||
assert(r1->encoding() < r2->encoding(), "should be ordered");
|
||||
push(RegisterSet(r1) | RegisterSet(r2));
|
||||
#endif
|
||||
}
|
||||
|
||||
void raw_pop(Register r1, Register r2) {
|
||||
#ifdef AARCH64
|
||||
ldp(r1, r2, Address(SP, 2*wordSize, post_indexed));
|
||||
#else
|
||||
assert(r1->encoding() < r2->encoding(), "should be ordered");
|
||||
pop(RegisterSet(r1) | RegisterSet(r2));
|
||||
#endif
|
||||
}
|
||||
|
||||
void raw_push(Register r1, Register r2, Register r3) {
|
||||
#ifdef AARCH64
|
||||
raw_push(r1, r2);
|
||||
raw_push(r3, ZR);
|
||||
#else
|
||||
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered");
|
||||
push(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3));
|
||||
#endif
|
||||
}
|
||||
|
||||
void raw_pop(Register r1, Register r2, Register r3) {
|
||||
#ifdef AARCH64
|
||||
raw_pop(r3, ZR);
|
||||
raw_pop(r1, r2);
|
||||
#else
|
||||
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered");
|
||||
pop(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3));
|
||||
#endif
|
||||
}
|
||||
|
||||
// Restores registers r1 and r2 previously saved by raw_push(r1, r2, ret_addr) and returns by ret_addr. Clobbers LR.
|
||||
void raw_pop_and_ret(Register r1, Register r2) {
|
||||
#ifdef AARCH64
|
||||
raw_pop(r1, r2, LR);
|
||||
ret();
|
||||
#else
|
||||
raw_pop(r1, r2, PC);
|
||||
#endif
|
||||
}
|
||||
|
||||
void indirect_jump(Address addr, Register scratch) {
|
||||
#ifdef AARCH64
|
||||
ldr(scratch, addr);
|
||||
br(scratch);
|
||||
#else
|
||||
ldr(PC, addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
void indirect_jump(InlinedAddress& literal, Register scratch) {
|
||||
#ifdef AARCH64
|
||||
ldr_literal(scratch, literal);
|
||||
br(scratch);
|
||||
#else
|
||||
ldr_literal(PC, literal);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
void neg(Register dst, Register src) {
|
||||
rsb(dst, src, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
void branch_if_negative_32(Register r, Label& L) {
|
||||
// Note about branch_if_negative_32() / branch_if_any_negative_32() implementation for AArch64:
|
||||
// TODO: This function and branch_if_any_negative_32 could possibly
|
||||
// be revised after the aarch64 removal.
|
||||
// tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB)
|
||||
// since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry.
|
||||
tst_32(r, r);
|
||||
@ -996,56 +834,31 @@ public:
|
||||
}
|
||||
|
||||
void branch_if_any_negative_32(Register r1, Register r2, Register tmp, Label& L) {
|
||||
#ifdef AARCH64
|
||||
orr_32(tmp, r1, r2);
|
||||
tst_32(tmp, tmp);
|
||||
#else
|
||||
orrs(tmp, r1, r2);
|
||||
#endif
|
||||
b(L, mi);
|
||||
}
|
||||
|
||||
void branch_if_any_negative_32(Register r1, Register r2, Register r3, Register tmp, Label& L) {
|
||||
orr_32(tmp, r1, r2);
|
||||
#ifdef AARCH64
|
||||
orr_32(tmp, tmp, r3);
|
||||
tst_32(tmp, tmp);
|
||||
#else
|
||||
orrs(tmp, tmp, r3);
|
||||
#endif
|
||||
b(L, mi);
|
||||
}
|
||||
|
||||
void add_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) {
|
||||
#ifdef AARCH64
|
||||
add(dst, r1, r2, ex_sxtw, shift);
|
||||
#else
|
||||
add(dst, r1, AsmOperand(r2, lsl, shift));
|
||||
#endif
|
||||
}
|
||||
|
||||
void sub_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) {
|
||||
#ifdef AARCH64
|
||||
sub(dst, r1, r2, ex_sxtw, shift);
|
||||
#else
|
||||
sub(dst, r1, AsmOperand(r2, lsl, shift));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// klass oop manipulations if compressed
|
||||
|
||||
#ifdef AARCH64
|
||||
void load_klass(Register dst_klass, Register src_oop);
|
||||
#else
|
||||
void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al);
|
||||
#endif // AARCH64
|
||||
|
||||
void store_klass(Register src_klass, Register dst_oop);
|
||||
|
||||
#ifdef AARCH64
|
||||
void store_klass_gap(Register dst);
|
||||
#endif // AARCH64
|
||||
|
||||
// oop manipulations
|
||||
|
||||
@ -1060,39 +873,6 @@ public:
|
||||
// All other registers are preserved.
|
||||
void resolve(DecoratorSet decorators, Register obj);
|
||||
|
||||
#ifdef AARCH64
|
||||
void encode_heap_oop(Register dst, Register src);
|
||||
void encode_heap_oop(Register r) {
|
||||
encode_heap_oop(r, r);
|
||||
}
|
||||
void decode_heap_oop(Register dst, Register src);
|
||||
void decode_heap_oop(Register r) {
|
||||
decode_heap_oop(r, r);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
void encode_heap_oop_not_null(Register dst, Register src);
|
||||
void decode_heap_oop_not_null(Register dst, Register src);
|
||||
|
||||
void set_narrow_klass(Register dst, Klass* k);
|
||||
void set_narrow_oop(Register dst, jobject obj);
|
||||
#endif
|
||||
|
||||
void encode_klass_not_null(Register r);
|
||||
void encode_klass_not_null(Register dst, Register src);
|
||||
void decode_klass_not_null(Register r);
|
||||
void decode_klass_not_null(Register dst, Register src);
|
||||
|
||||
void reinit_heapbase();
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_heapbase(const char* msg);
|
||||
#endif // ASSERT
|
||||
|
||||
static int instr_count_for_mov_slow(intptr_t c);
|
||||
static int instr_count_for_mov_slow(address addr);
|
||||
static int instr_count_for_decode_klass_not_null();
|
||||
#endif // AARCH64
|
||||
|
||||
void ldr_global_ptr(Register reg, address address_of_global);
|
||||
void ldr_global_s32(Register reg, address address_of_global);
|
||||
@ -1108,12 +888,7 @@ public:
|
||||
|
||||
assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size");
|
||||
|
||||
#ifdef AARCH64
|
||||
emit_int32(address_placeholder_instruction);
|
||||
emit_int32(address_placeholder_instruction);
|
||||
#else
|
||||
AbstractAssembler::emit_address((address)address_placeholder_instruction);
|
||||
#endif
|
||||
}
|
||||
|
||||
void b(address target, AsmCondition cond = al) {
|
||||
@ -1124,15 +899,14 @@ public:
|
||||
Assembler::b(target(L), cond);
|
||||
}
|
||||
|
||||
void bl(address target NOT_AARCH64_ARG(AsmCondition cond = al)) {
|
||||
Assembler::bl(target NOT_AARCH64_ARG(cond));
|
||||
void bl(address target, AsmCondition cond = al) {
|
||||
Assembler::bl(target, cond);
|
||||
}
|
||||
void bl(Label& L NOT_AARCH64_ARG(AsmCondition cond = al)) {
|
||||
void bl(Label& L, AsmCondition cond = al) {
|
||||
// internal calls
|
||||
Assembler::bl(target(L) NOT_AARCH64_ARG(cond));
|
||||
Assembler::bl(target(L), cond);
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
void adr(Register dest, Label& L, AsmCondition cond = al) {
|
||||
int delta = target(L) - pc() - 8;
|
||||
if (delta >= 0) {
|
||||
@ -1141,7 +915,6 @@ public:
|
||||
sub(dest, PC, -delta, cond);
|
||||
}
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
// Variable-length jump and calls. We now distinguish only the
|
||||
// patchable case from the other cases. Patchable must be
|
||||
@ -1165,30 +938,23 @@ public:
|
||||
// specified to allow future optimizations.
|
||||
void jump(address target,
|
||||
relocInfo::relocType rtype = relocInfo::runtime_call_type,
|
||||
Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg)
|
||||
#ifndef AARCH64
|
||||
, AsmCondition cond = al
|
||||
#endif
|
||||
);
|
||||
Register scratch = noreg, AsmCondition cond = al);
|
||||
|
||||
void call(address target,
|
||||
RelocationHolder rspec
|
||||
NOT_AARCH64_ARG(AsmCondition cond = al));
|
||||
RelocationHolder rspec, AsmCondition cond = al);
|
||||
|
||||
void call(address target,
|
||||
relocInfo::relocType rtype = relocInfo::runtime_call_type
|
||||
NOT_AARCH64_ARG(AsmCondition cond = al)) {
|
||||
call(target, Relocation::spec_simple(rtype) NOT_AARCH64_ARG(cond));
|
||||
relocInfo::relocType rtype = relocInfo::runtime_call_type,
|
||||
AsmCondition cond = al) {
|
||||
call(target, Relocation::spec_simple(rtype), cond);
|
||||
}
|
||||
|
||||
void jump(AddressLiteral dest) {
|
||||
jump(dest.target(), dest.reloc());
|
||||
}
|
||||
#ifndef AARCH64
|
||||
void jump(address dest, relocInfo::relocType rtype, AsmCondition cond) {
|
||||
jump(dest, rtype, Rtemp, cond);
|
||||
}
|
||||
#endif
|
||||
|
||||
void call(AddressLiteral dest) {
|
||||
call(dest.target(), dest.reloc());
|
||||
@ -1206,10 +972,7 @@ public:
|
||||
// specified to allow future optimizations.
|
||||
void patchable_jump(address target,
|
||||
relocInfo::relocType rtype = relocInfo::runtime_call_type,
|
||||
Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg)
|
||||
#ifndef AARCH64
|
||||
, AsmCondition cond = al
|
||||
#endif
|
||||
Register scratch = noreg, AsmCondition cond = al
|
||||
);
|
||||
|
||||
// patchable_call may scratch Rtemp
|
||||
@ -1223,13 +986,7 @@ public:
|
||||
return patchable_call(target, Relocation::spec_simple(rtype), c2);
|
||||
}
|
||||
|
||||
#if defined(AARCH64) && defined(COMPILER2)
|
||||
static int call_size(address target, bool far, bool patchable);
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
static bool page_reachable_from_cache(address target);
|
||||
#endif
|
||||
static bool _reachable_from_cache(address target);
|
||||
static bool _cache_fully_reachable();
|
||||
bool cache_fully_reachable();
|
||||
@ -1239,15 +996,8 @@ public:
|
||||
void sign_extend(Register rd, Register rn, int bits);
|
||||
|
||||
inline void zap_high_non_significant_bits(Register r) {
|
||||
#ifdef AARCH64
|
||||
if(ZapHighNonSignificantBits) {
|
||||
movk(r, 0xBAAD, 48);
|
||||
movk(r, 0xF00D, 32);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
void cmpoop(Register obj1, Register obj2);
|
||||
|
||||
void long_move(Register rd_lo, Register rd_hi,
|
||||
@ -1263,7 +1013,6 @@ public:
|
||||
void atomic_cas(Register tmpreg1, Register tmpreg2, Register oldval, Register newval, Register base, int offset);
|
||||
void atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg);
|
||||
void atomic_cas64(Register temp_lo, Register temp_hi, Register temp_result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset);
|
||||
#endif // !AARCH64
|
||||
|
||||
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
||||
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
||||
@ -1286,14 +1035,9 @@ public:
|
||||
// size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM);
|
||||
// each of these calls generates exactly one load or store instruction,
|
||||
// so src can be pre- or post-indexed address.
|
||||
#ifdef AARCH64
|
||||
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
|
||||
void store_sized_value(Register src, Address dst, size_t size_in_bytes);
|
||||
#else
|
||||
// 32-bit ARM variants also support conditional execution
|
||||
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al);
|
||||
void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al);
|
||||
#endif
|
||||
|
||||
void lookup_interface_method(Register recv_klass,
|
||||
Register intf_klass,
|
||||
@ -1315,11 +1059,7 @@ public:
|
||||
|
||||
void ldr_literal(Register rd, AddressLiteral addr) {
|
||||
relocate(addr.rspec());
|
||||
#ifdef AARCH64
|
||||
ldr(rd, addr.target());
|
||||
#else
|
||||
ldr(rd, Address(PC, addr.target() - pc() - 8));
|
||||
#endif
|
||||
}
|
||||
|
||||
void lea(Register Rd, AddressLiteral addr) {
|
||||
@ -1330,46 +1070,10 @@ public:
|
||||
void restore_default_fp_mode();
|
||||
|
||||
#ifdef COMPILER2
|
||||
#ifdef AARCH64
|
||||
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
|
||||
void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3);
|
||||
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3);
|
||||
#else
|
||||
void fast_lock(Register obj, Register box, Register scratch, Register scratch2);
|
||||
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
#define F(mnemonic) \
|
||||
void mnemonic(Register rt, address target) { \
|
||||
Assembler::mnemonic(rt, target); \
|
||||
} \
|
||||
void mnemonic(Register rt, Label& L) { \
|
||||
Assembler::mnemonic(rt, target(L)); \
|
||||
}
|
||||
|
||||
F(cbz_w);
|
||||
F(cbnz_w);
|
||||
F(cbz);
|
||||
F(cbnz);
|
||||
|
||||
#undef F
|
||||
|
||||
#define F(mnemonic) \
|
||||
void mnemonic(Register rt, int bit, address target) { \
|
||||
Assembler::mnemonic(rt, bit, target); \
|
||||
} \
|
||||
void mnemonic(Register rt, int bit, Label& L) { \
|
||||
Assembler::mnemonic(rt, bit, target(L)); \
|
||||
}
|
||||
|
||||
F(tbz);
|
||||
F(tbnz);
|
||||
#undef F
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
};
|
||||
|
||||
|
@ -32,46 +32,9 @@
|
||||
|
||||
inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
|
||||
int instr = *(int*)branch;
|
||||
int new_offset = (int)(target - branch NOT_AARCH64(- 8));
|
||||
int new_offset = (int)(target - branch - 8);
|
||||
assert((new_offset & 3) == 0, "bad alignment");
|
||||
|
||||
#ifdef AARCH64
|
||||
if ((instr & (0x1f << 26)) == (0b00101 << 26)) {
|
||||
// Unconditional B or BL
|
||||
assert (is_offset_in_range(new_offset, 26), "offset is too large");
|
||||
*(int*)branch = (instr & ~right_n_bits(26)) | encode_offset(new_offset, 26, 0);
|
||||
} else if ((instr & (0xff << 24)) == (0b01010100 << 24) && (instr & (1 << 4)) == 0) {
|
||||
// Conditional B
|
||||
assert (is_offset_in_range(new_offset, 19), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_offset(new_offset, 19, 5);
|
||||
} else if ((instr & (0b111111 << 25)) == (0b011010 << 25)) {
|
||||
// Compare & branch CBZ/CBNZ
|
||||
assert (is_offset_in_range(new_offset, 19), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_offset(new_offset, 19, 5);
|
||||
} else if ((instr & (0b111111 << 25)) == (0b011011 << 25)) {
|
||||
// Test & branch TBZ/TBNZ
|
||||
assert (is_offset_in_range(new_offset, 14), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(14) << 5)) | encode_offset(new_offset, 14, 5);
|
||||
} else if ((instr & (0b111011 << 24)) == (0b011000 << 24)) {
|
||||
// LDR (literal)
|
||||
unsigned opc = ((unsigned)instr >> 30);
|
||||
assert (opc != 0b01 || ((uintx)target & 7) == 0, "ldr target should be aligned");
|
||||
assert (is_offset_in_range(new_offset, 19), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_offset(new_offset, 19, 5);
|
||||
} else if (((instr & (1 << 31)) == 0) && ((instr & (0b11111 << 24)) == (0b10000 << 24))) {
|
||||
// ADR
|
||||
assert (is_imm_in_range(new_offset, 21, 0), "offset is too large");
|
||||
instr = (instr & ~(right_n_bits(2) << 29)) | (new_offset & 3) << 29;
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_imm(new_offset >> 2, 19, 0, 5);
|
||||
} else if((unsigned int)instr == address_placeholder_instruction) {
|
||||
// address
|
||||
assert (*(unsigned int *)(branch + InstructionSize) == address_placeholder_instruction, "address placeholder occupies two instructions");
|
||||
*(intx*)branch = (intx)target;
|
||||
} else {
|
||||
::tty->print_cr("=============== instruction: 0x%x ================\n", instr);
|
||||
Unimplemented(); // TODO-AARCH64
|
||||
}
|
||||
#else
|
||||
if ((instr & 0x0e000000) == 0x0a000000) {
|
||||
// B or BL instruction
|
||||
assert(new_offset < 0x2000000 && new_offset > -0x2000000, "encoding constraint");
|
||||
@ -98,7 +61,6 @@ inline void MacroAssembler::pd_patch_instruction(address branch, address target,
|
||||
*(int*)branch = (instr & 0xff0ff000) | 1 << 20 | -new_offset;
|
||||
}
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_MACROASSEMBLER_ARM_INLINE_HPP
|
||||
|
@ -125,15 +125,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, bool for_comp
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
__ ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
__ cbz(Rtemp, L);
|
||||
__ indirect_jump(Address(Rmethod, Method::interpreter_entry_offset()), Rtemp);
|
||||
__ bind(L);
|
||||
#else
|
||||
__ cmp(Rtemp, 0);
|
||||
__ ldr(PC, Address(Rmethod, Method::interpreter_entry_offset()), ne);
|
||||
#endif // AARCH64
|
||||
}
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
@ -268,11 +261,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
||||
DEBUG_ONLY(rdx_param_size = noreg);
|
||||
}
|
||||
Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now
|
||||
#ifdef AARCH64
|
||||
__ ldr(rbx_member, Address(Rparams, Interpreter::stackElementSize, post_indexed));
|
||||
#else
|
||||
__ pop(rbx_member);
|
||||
#endif
|
||||
generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
|
||||
}
|
||||
return entry_point;
|
||||
@ -288,22 +277,15 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
Register rbx_method = Rmethod; // eventual target of this invocation
|
||||
// temps used in this code are not used in *either* compiled or interpreted calling sequences
|
||||
Register temp1 = (for_compiler_entry ? saved_last_sp_register() : R1_tmp);
|
||||
Register temp2 = AARCH64_ONLY(R9) NOT_AARCH64(R8);
|
||||
Register temp2 = R8;
|
||||
Register temp3 = Rtemp; // R12/R16
|
||||
Register temp4 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R5);
|
||||
Register temp4 = R5;
|
||||
if (for_compiler_entry) {
|
||||
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
|
||||
#ifdef AARCH64
|
||||
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp4, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
#else
|
||||
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
assert_different_registers(temp4, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
#endif // AARCH64
|
||||
}
|
||||
assert_different_registers(temp1, temp2, temp3, receiver_reg);
|
||||
assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
|
||||
@ -353,12 +335,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
__ load_heap_oop(temp2_defc, member_clazz);
|
||||
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
|
||||
__ verify_klass_ptr(temp2_defc);
|
||||
#ifdef AARCH64
|
||||
// TODO-AARCH64
|
||||
__ b(L_ok);
|
||||
#else
|
||||
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, noreg, L_ok);
|
||||
#endif
|
||||
// If we get here, the type check failed!
|
||||
__ stop("receiver class disagrees with MemberName.clazz");
|
||||
__ bind(L_ok);
|
||||
@ -484,13 +461,9 @@ enum {
|
||||
// the slop defends against false alarms due to fencepost errors
|
||||
};
|
||||
|
||||
#ifdef AARCH64
|
||||
const int trace_mh_nregs = 32; // R0-R30, PC
|
||||
#else
|
||||
const int trace_mh_nregs = 15;
|
||||
const Register trace_mh_regs[trace_mh_nregs] =
|
||||
{R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, PC};
|
||||
#endif // AARCH64
|
||||
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
intptr_t* saved_regs,
|
||||
@ -501,7 +474,7 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
|
||||
intptr_t* entry_sp = (intptr_t*) &saved_regs[trace_mh_nregs]; // just after the saved regs
|
||||
intptr_t* saved_sp = (intptr_t*) saved_regs[Rsender_sp->encoding()]; // save of Rsender_sp
|
||||
intptr_t* last_sp = (intptr_t*) saved_bp[AARCH64_ONLY(frame::interpreter_frame_stack_top_offset) NOT_AARCH64(frame::interpreter_frame_last_sp_offset)];
|
||||
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
|
||||
intptr_t* base_sp = last_sp;
|
||||
|
||||
intptr_t mh_reg = (intptr_t)saved_regs[R5_mh->encoding()];
|
||||
@ -517,13 +490,9 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
tty->print(" reg dump: ");
|
||||
int i;
|
||||
for (i = 0; i < trace_mh_nregs; i++) {
|
||||
if (i > 0 && i % AARCH64_ONLY(2) NOT_AARCH64(4) == 0)
|
||||
if (i > 0 && i % 4 == 0)
|
||||
tty->print("\n + dump: ");
|
||||
#ifdef AARCH64
|
||||
const char* reg_name = (i == trace_mh_nregs-1) ? "pc" : as_Register(i)->name();
|
||||
#else
|
||||
const char* reg_name = trace_mh_regs[i]->name();
|
||||
#endif
|
||||
tty->print(" %s: " INTPTR_FORMAT, reg_name, p2i((void *)saved_regs[i]));
|
||||
}
|
||||
tty->cr();
|
||||
|
@ -30,11 +30,7 @@
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
|
||||
#ifdef AARCH64
|
||||
#include "nativeInst_arm_64.hpp"
|
||||
#else
|
||||
#include "nativeInst_arm_32.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
#endif // CPU_ARM_VM_NATIVEINST_ARM_HPP
|
||||
|
@ -1,243 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
void RawNativeInstruction::verify() {
|
||||
// make sure code pattern is actually an instruction address
|
||||
address addr = instruction_address();
|
||||
if (addr == NULL || ((intptr_t)addr & (instruction_size - 1)) != 0) {
|
||||
fatal("not an instruction address");
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovRegMem::set_offset(int x) {
|
||||
int scale = get_offset_scale();
|
||||
assert((x & right_n_bits(scale)) == 0, "offset should be aligned");
|
||||
guarantee((x >> 24) == 0, "encoding constraint");
|
||||
|
||||
if (Assembler::is_unsigned_imm_in_range(x, 12, scale)) {
|
||||
set_unsigned_imm(x, 12, get_offset_scale(), 10);
|
||||
return;
|
||||
}
|
||||
|
||||
// If offset is too large to be placed into single ldr/str instruction, we replace
|
||||
// ldr/str Rt, [Rn, #offset]
|
||||
// nop
|
||||
// with
|
||||
// add LR, Rn, #offset_hi
|
||||
// ldr/str Rt, [LR, #offset_lo]
|
||||
|
||||
// Note: Rtemp cannot be used as a temporary register as it could be used
|
||||
// for value being stored (see LIR_Assembler::reg2mem).
|
||||
// Patchable NativeMovRegMem instructions are generated in LIR_Assembler::mem2reg and LIR_Assembler::reg2mem
|
||||
// which do not use LR, so it is free. Also, it does not conflict with LR usages in c1_LIRGenerator_arm.cpp.
|
||||
const int tmp = LR->encoding();
|
||||
const int rn = (encoding() >> 5) & 0x1f;
|
||||
|
||||
NativeInstruction* next = nativeInstruction_at(next_raw_instruction_address());
|
||||
assert(next->is_nop(), "must be");
|
||||
|
||||
next->set_encoding((encoding() & 0xffc0001f) | Assembler::encode_unsigned_imm((x & 0xfff), 12, scale, 10) | tmp << 5);
|
||||
this->set_encoding(0x91400000 | Assembler::encode_unsigned_imm((x >> 12), 12, 0, 10) | rn << 5 | tmp);
|
||||
}
|
||||
|
||||
intptr_t NativeMovConstReg::_data() const {
|
||||
#ifdef COMPILER2
|
||||
if (is_movz()) {
|
||||
// narrow constant or ic call cached value
|
||||
RawNativeInstruction* ni = next_raw();
|
||||
assert(ni->is_movk(), "movz;movk expected");
|
||||
uint lo16 = (encoding() >> 5) & 0xffff;
|
||||
intptr_t hi = 0;
|
||||
int i = 0;
|
||||
while (ni->is_movk() && i < 3) {
|
||||
uint hi16 = (ni->encoding() >> 5) & 0xffff;
|
||||
int shift = ((ni->encoding() >> 21) & 0x3) << 4;
|
||||
hi |= (intptr_t)hi16 << shift;
|
||||
ni = ni->next_raw();
|
||||
++i;
|
||||
}
|
||||
return lo16 | hi;
|
||||
}
|
||||
#endif
|
||||
return (intptr_t)(nativeLdrLiteral_at(instruction_address())->literal_value());
|
||||
}
|
||||
|
||||
static void raw_set_data(RawNativeInstruction* si, intptr_t x, oop* oop_addr, Metadata** metadata_addr) {
|
||||
#ifdef COMPILER2
|
||||
if (si->is_movz()) {
|
||||
// narrow constant or ic call cached value
|
||||
uintptr_t nx = 0;
|
||||
int val_size = 32;
|
||||
if (oop_addr != NULL) {
|
||||
narrowOop encoded_oop = CompressedOops::encode(*oop_addr);
|
||||
nx = encoded_oop;
|
||||
} else if (metadata_addr != NULL) {
|
||||
assert((*metadata_addr)->is_klass(), "expected Klass");
|
||||
narrowKlass encoded_k = Klass::encode_klass((Klass *)*metadata_addr);
|
||||
nx = encoded_k;
|
||||
} else {
|
||||
nx = x;
|
||||
val_size = 64;
|
||||
}
|
||||
RawNativeInstruction* ni = si->next_raw();
|
||||
uint lo16 = nx & 0xffff;
|
||||
int shift = 16;
|
||||
int imm16 = 0xffff << 5;
|
||||
si->set_encoding((si->encoding() & ~imm16) | (lo16 << 5));
|
||||
while (shift < val_size) {
|
||||
assert(ni->is_movk(), "movk expected");
|
||||
assert((((ni->encoding() >> 21) & 0x3) << 4) == shift, "wrong shift");
|
||||
uint hi16 = (nx >> shift) & 0xffff;
|
||||
ni->set_encoding((ni->encoding() & ~imm16) | (hi16 << 5));
|
||||
shift += 16;
|
||||
ni = ni->next_raw();
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(si->is_ldr_literal(), "should be");
|
||||
|
||||
if (oop_addr == NULL && metadata_addr == NULL) {
|
||||
// A static ldr_literal without oop_relocation
|
||||
nativeLdrLiteral_at(si->instruction_address())->set_literal_value((address)x);
|
||||
} else {
|
||||
// Oop is loaded from oops section
|
||||
address addr = oop_addr != NULL ? (address)oop_addr : (address)metadata_addr;
|
||||
int offset = addr - si->instruction_address();
|
||||
|
||||
assert((((intptr_t)addr) & 0x7) == 0, "target address should be aligned");
|
||||
assert((offset & 0x3) == 0, "offset should be aligned");
|
||||
|
||||
guarantee(Assembler::is_offset_in_range(offset, 19), "offset is not in range");
|
||||
nativeLdrLiteral_at(si->instruction_address())->set_literal_address(si->instruction_address() + offset);
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovConstReg::set_data(intptr_t x) {
|
||||
// Find and replace the oop corresponding to this instruction in oops section
|
||||
oop* oop_addr = NULL;
|
||||
Metadata** metadata_addr = NULL;
|
||||
CodeBlob* cb = CodeCache::find_blob(instruction_address());
|
||||
{
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
RelocIterator iter(nm, instruction_address(), next_raw()->instruction_address());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
oop_addr = iter.oop_reloc()->oop_addr();
|
||||
*oop_addr = cast_to_oop(x);
|
||||
break;
|
||||
} else if (iter.type() == relocInfo::metadata_type) {
|
||||
metadata_addr = iter.metadata_reloc()->metadata_addr();
|
||||
*metadata_addr = (Metadata*)x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
raw_set_data(adjust(this), x, oop_addr, metadata_addr);
|
||||
}
|
||||
|
||||
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
}
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be");
|
||||
|
||||
NativeInstruction* instr = nativeInstruction_at(verified_entry);
|
||||
assert(instr->is_nop() || instr->encoding() == zombie_illegal_instruction, "required for MT-safe patching");
|
||||
instr->set_encoding(zombie_illegal_instruction);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
assert (nativeInstruction_at(instr_addr)->is_b(), "MT-safe patching of arbitrary instructions is not allowed");
|
||||
assert (nativeInstruction_at(code_buffer)->is_nop(), "MT-safe patching of arbitrary instructions is not allowed");
|
||||
nativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
// Insert at code_pos unconditional B instruction jumping to entry
|
||||
intx offset = entry - code_pos;
|
||||
assert (Assembler::is_offset_in_range(offset, 26), "offset is out of range");
|
||||
|
||||
NativeInstruction* instr = nativeInstruction_at(code_pos);
|
||||
assert (instr->is_b() || instr->is_nop(), "MT-safe patching of arbitrary instructions is not allowed");
|
||||
|
||||
instr->set_encoding(0x5 << 26 | Assembler::encode_offset(offset, 26, 0));
|
||||
}
|
||||
|
||||
static address call_for(address return_address) {
|
||||
CodeBlob* cb = CodeCache::find_blob(return_address);
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm == NULL) {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Look back 8 instructions (for LIR_Assembler::ic_call and MacroAssembler::patchable_call)
|
||||
address begin = return_address - 8*NativeInstruction::instruction_size;
|
||||
if (begin < nm->code_begin()) {
|
||||
begin = nm->code_begin();
|
||||
}
|
||||
RelocIterator iter(nm, begin, return_address);
|
||||
while (iter.next()) {
|
||||
Relocation* reloc = iter.reloc();
|
||||
if (reloc->is_call()) {
|
||||
address call = reloc->addr();
|
||||
if (nativeInstruction_at(call)->is_call()) {
|
||||
if (nativeCall_at(call)->return_address() == return_address) {
|
||||
return call;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool NativeCall::is_call_before(address return_address) {
|
||||
return (call_for(return_address) != NULL);
|
||||
}
|
||||
|
||||
NativeCall* nativeCall_before(address return_address) {
|
||||
assert(NativeCall::is_call_before(return_address), "must be");
|
||||
return nativeCall_at(call_for(return_address));
|
||||
}
|
@ -1,771 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_NATIVEINST_ARM_64_HPP
|
||||
#define CPU_ARM_VM_NATIVEINST_ARM_64_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Some experimental projects extend the ARM back-end by implementing
|
||||
// what the front-end usually assumes is a single native instruction
|
||||
// with a sequence of instructions.
|
||||
//
|
||||
// The 'Raw' variants are the low level initial code (usually one
|
||||
// instruction wide but some of them were already composed
|
||||
// instructions). They should be used only by the back-end.
|
||||
//
|
||||
// The non-raw classes are the front-end entry point, hiding potential
|
||||
// back-end extensions or the actual instructions size.
|
||||
class NativeInstruction;
|
||||
|
||||
class RawNativeInstruction {
|
||||
public:
|
||||
|
||||
enum ARM_specific {
|
||||
instruction_size = Assembler::InstructionSize,
|
||||
instruction_size_in_bits = instruction_size * BitsPerByte,
|
||||
};
|
||||
|
||||
// illegal instruction used by NativeJump::patch_verified_entry
|
||||
static const int zombie_illegal_instruction = 0xd4000542; // hvc #42
|
||||
|
||||
address addr_at(int offset) const { return (address)this + offset; }
|
||||
address instruction_address() const { return addr_at(0); }
|
||||
address next_raw_instruction_address() const { return addr_at(instruction_size); }
|
||||
|
||||
static RawNativeInstruction* at(address address) {
|
||||
return (RawNativeInstruction*)address;
|
||||
}
|
||||
|
||||
RawNativeInstruction* next_raw() const {
|
||||
return at(next_raw_instruction_address());
|
||||
}
|
||||
|
||||
int encoding() const {
|
||||
return *(int*)this;
|
||||
}
|
||||
|
||||
void set_encoding(int value) {
|
||||
int old = encoding();
|
||||
if (old != value) {
|
||||
*(int*)this = value;
|
||||
ICache::invalidate_word((address)this);
|
||||
}
|
||||
}
|
||||
|
||||
bool is_nop() const { return encoding() == (int)0xd503201f; }
|
||||
bool is_b() const { return (encoding() & 0xfc000000) == 0x14000000; } // unconditional branch
|
||||
bool is_b_cond() const { return (encoding() & 0xff000010) == 0x54000000; } // conditional branch
|
||||
bool is_bl() const { return (encoding() & 0xfc000000) == 0x94000000; }
|
||||
bool is_br() const { return (encoding() & 0xfffffc1f) == 0xd61f0000; }
|
||||
bool is_blr() const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
|
||||
bool is_ldr_literal() const { return (encoding() & 0xff000000) == 0x58000000; }
|
||||
bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
|
||||
bool is_adr_aligned_lr() const { return (encoding() & 0xff00001f) == 0x1000001e; } // adr LR, <label>, where label is aligned to 4 bytes (address of instruction).
|
||||
|
||||
bool is_ldr_str_gp_reg_unsigned_imm() const { return (encoding() & 0x3f000000) == 0x39000000; } // ldr/str{b, sb, h, sh, _w, sw} Rt, [Rn, #imm]
|
||||
bool is_ldr_str_fp_reg_unsigned_imm() const { return (encoding() & 0x3f000000) == 0x3D000000; } // ldr/str Rt(SIMD), [Rn, #imm]
|
||||
bool is_ldr_str_reg_unsigned_imm() const { return is_ldr_str_gp_reg_unsigned_imm() || is_ldr_str_fp_reg_unsigned_imm(); }
|
||||
|
||||
bool is_stp_preindex() const { return (encoding() & 0xffc00000) == 0xa9800000; } // stp Xt1, Xt2, [Xn, #imm]!
|
||||
bool is_ldp_postindex() const { return (encoding() & 0xffc00000) == 0xa8c00000; } // ldp Xt1, Xt2, [Xn] #imm
|
||||
bool is_mov_sp() const { return (encoding() & 0xfffffc00) == 0x91000000; } // mov <Xn|SP>, <Xm|SP>
|
||||
bool is_movn() const { return (encoding() & 0x7f800000) == 0x12800000; }
|
||||
bool is_movz() const { return (encoding() & 0x7f800000) == 0x52800000; }
|
||||
bool is_movk() const { return (encoding() & 0x7f800000) == 0x72800000; }
|
||||
bool is_orr_imm() const { return (encoding() & 0x7f800000) == 0x32000000; }
|
||||
bool is_cmp_rr() const { return (encoding() & 0x7fe00000) == 0x6b000000; }
|
||||
bool is_csel() const { return (encoding() & 0x7fe00000) == 0x1a800000; }
|
||||
bool is_sub_shift() const { return (encoding() & 0x7f200000) == 0x4b000000; } // sub Rd, Rn, shift (Rm, imm)
|
||||
bool is_mov() const { return (encoding() & 0x7fe0ffe0) == 0x2a0003e0; } // mov Rd, Rm (orr Rd, ZR, shift (Rm, 0))
|
||||
bool is_tst() const { return (encoding() & 0x7f20001f) == 0x6a00001f; } // tst Rn, shift (Rm, imm) (ands ZR, Rn, shift(Rm, imm))
|
||||
bool is_lsr_imm() const { return (encoding() & 0x7f807c00) == 0x53007c00; } // lsr Rd, Rn, imm (ubfm Rd, Rn, imm, 31/63)
|
||||
|
||||
bool is_far_jump() const { return is_ldr_literal() && next_raw()->is_br(); }
|
||||
bool is_fat_call() const {
|
||||
return
|
||||
#ifdef COMPILER2
|
||||
(is_blr() && next_raw()->is_b()) ||
|
||||
#endif
|
||||
(is_adr_aligned_lr() && next_raw()->is_br());
|
||||
}
|
||||
bool is_far_call() const {
|
||||
return is_ldr_literal() && next_raw()->is_fat_call();
|
||||
}
|
||||
|
||||
bool is_ic_near_call() const { return is_adr_aligned_lr() && next_raw()->is_b(); }
|
||||
bool is_ic_far_call() const { return is_adr_aligned_lr() && next_raw()->is_ldr_literal() && next_raw()->next_raw()->is_br(); }
|
||||
bool is_ic_call() const { return is_ic_near_call() || is_ic_far_call(); }
|
||||
|
||||
bool is_jump() const { return is_b() || is_far_jump(); }
|
||||
bool is_call() const { return is_bl() || is_far_call() || is_ic_call(); }
|
||||
bool is_branch() const { return is_b() || is_bl(); }
|
||||
|
||||
// c2 doesn't use fixed registers for safepoint poll address
|
||||
bool is_safepoint_poll() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_save_all_registers(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (!current->is_stp_preindex()) return false; current = current->next_raw();
|
||||
for (int i = 28; i >= 0; i -= 2) {
|
||||
if (!current->is_stp_preindex()) return false; current = current->next_raw();
|
||||
}
|
||||
|
||||
if (!current->is_adr_aligned()) return false; current = current->next_raw();
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_restore_all_registers(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
for (int i = 0; i <= 28; i += 2) {
|
||||
if (!current->is_ldp_postindex()) return false; current = current->next_raw();
|
||||
}
|
||||
if (!current->is_ldp_postindex()) return false; current = current->next_raw();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
const RawNativeInstruction* skip_bind_literal() const {
|
||||
const RawNativeInstruction* current = this;
|
||||
if (((uintptr_t)current) % wordSize != 0) {
|
||||
assert(current->is_nop(), "should be");
|
||||
current = current->next_raw();
|
||||
}
|
||||
assert(((uintptr_t)current) % wordSize == 0, "should be"); // bound literal should be aligned
|
||||
current = current->next_raw()->next_raw();
|
||||
return current;
|
||||
}
|
||||
|
||||
bool is_stop(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (!current->is_save_all_registers(¤t)) return false;
|
||||
if (!current->is_ldr_literal()) return false; current = current->next_raw();
|
||||
if (!current->is_mov_sp()) return false; current = current->next_raw();
|
||||
if (!current->is_ldr_literal()) return false; current = current->next_raw();
|
||||
if (!current->is_br()) return false; current = current->next_raw();
|
||||
|
||||
current = current->skip_bind_literal();
|
||||
current = current->skip_bind_literal();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_mov_slow(const RawNativeInstruction** next = NULL) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (current->is_orr_imm()) {
|
||||
current = current->next_raw();
|
||||
|
||||
} else if (current->is_movn() || current->is_movz()) {
|
||||
current = current->next_raw();
|
||||
int movkCount = 0;
|
||||
while (current->is_movk()) {
|
||||
movkCount++;
|
||||
if (movkCount > 3) return false;
|
||||
current = current->next_raw();
|
||||
}
|
||||
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (next != NULL) {
|
||||
*next = (RawNativeInstruction*)current;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void skip_verify_heapbase(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (CheckCompressedOops) {
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
|
||||
if (!current->is_stp_preindex()) return; current = current->next_raw();
|
||||
// NOTE: temporary workaround, remove with m6-01?
|
||||
// skip saving condition flags
|
||||
current = current->next_raw();
|
||||
current = current->next_raw();
|
||||
|
||||
if (!current->is_mov_slow(¤t)) return;
|
||||
if (!current->is_cmp_rr()) return; current = current->next_raw();
|
||||
if (!current->is_b_cond()) return; current = current->next_raw();
|
||||
if (!current->is_stop(¤t)) return;
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (current->is_nop()) current = current->next_raw();
|
||||
#endif
|
||||
// NOTE: temporary workaround, remove with m6-01?
|
||||
// skip restoring condition flags
|
||||
current = current->next_raw();
|
||||
current = current->next_raw();
|
||||
|
||||
if (!current->is_ldp_postindex()) return; current = current->next_raw();
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
|
||||
}
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
bool is_ldr_global_ptr(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (!current->is_mov_slow(¤t)) return false;
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
void skip_verify_oop(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (VerifyOops) {
|
||||
if (!current->is_save_all_registers(¤t)) return;
|
||||
|
||||
if (current->is_mov()) {
|
||||
current = current->next_raw();
|
||||
}
|
||||
|
||||
if (!current->is_mov_sp()) return; current = current->next_raw();
|
||||
if (!current->is_ldr_literal()) return; current = current->next_raw();
|
||||
if (!current->is_ldr_global_ptr(¤t)) return;
|
||||
if (!current->is_blr()) return; current = current->next_raw();
|
||||
if (!current->is_restore_all_registers(¤t)) return;
|
||||
if (!current->is_b()) return; current = current->next_raw();
|
||||
|
||||
current = current->skip_bind_literal();
|
||||
}
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
}
|
||||
|
||||
void skip_encode_heap_oop(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
#ifdef ASSERT
|
||||
current->skip_verify_heapbase(¤t);
|
||||
#endif // ASSERT
|
||||
current->skip_verify_oop(¤t);
|
||||
|
||||
if (Universe::narrow_oop_base() == NULL) {
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
if (!current->is_lsr_imm()) return; current = current->next_raw();
|
||||
} else {
|
||||
if (current->is_mov()) {
|
||||
current = current->next_raw();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!current->is_tst()) return; current = current->next_raw();
|
||||
if (!current->is_csel()) return; current = current->next_raw();
|
||||
if (!current->is_sub_shift()) return; current = current->next_raw();
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
if (!current->is_lsr_imm()) return; current = current->next_raw();
|
||||
}
|
||||
}
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
}
|
||||
|
||||
void verify();
|
||||
|
||||
// For unit tests
|
||||
static void test() {}
|
||||
|
||||
private:
|
||||
|
||||
void check_bits_range(int bits, int scale, int low_bit) const {
|
||||
assert((0 <= low_bit) && (0 < bits) && (low_bit + bits <= instruction_size_in_bits), "invalid bits range");
|
||||
assert((0 <= scale) && (scale <= 4), "scale is out of range");
|
||||
}
|
||||
|
||||
void set_imm(int imm_encoding, int bits, int low_bit) {
|
||||
int imm_mask = right_n_bits(bits) << low_bit;
|
||||
assert((imm_encoding & ~imm_mask) == 0, "invalid imm encoding");
|
||||
set_encoding((encoding() & ~imm_mask) | imm_encoding);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
// Returns signed immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
|
||||
int get_signed_imm(int bits, int scale, int low_bit) const {
|
||||
check_bits_range(bits, scale, low_bit);
|
||||
int high_bits_to_clean = (instruction_size_in_bits - (low_bit + bits));
|
||||
return encoding() << high_bits_to_clean >> (high_bits_to_clean + low_bit) << scale;
|
||||
}
|
||||
|
||||
// Puts given signed immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
|
||||
void set_signed_imm(int value, int bits, int scale, int low_bit) {
|
||||
set_imm(Assembler::encode_imm(value, bits, scale, low_bit), bits, low_bit);
|
||||
}
|
||||
|
||||
// Returns unsigned immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
|
||||
int get_unsigned_imm(int bits, int scale, int low_bit) const {
|
||||
check_bits_range(bits, scale, low_bit);
|
||||
return ((encoding() >> low_bit) & right_n_bits(bits)) << scale;
|
||||
}
|
||||
|
||||
// Puts given unsigned immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
|
||||
void set_unsigned_imm(int value, int bits, int scale, int low_bit) {
|
||||
set_imm(Assembler::encode_unsigned_imm(value, bits, scale, low_bit), bits, low_bit);
|
||||
}
|
||||
|
||||
int get_signed_offset(int bits, int low_bit) const {
|
||||
return get_signed_imm(bits, 2, low_bit);
|
||||
}
|
||||
|
||||
void set_signed_offset(int offset, int bits, int low_bit) {
|
||||
set_signed_imm(offset, bits, 2, low_bit);
|
||||
}
|
||||
};
|
||||
|
||||
inline RawNativeInstruction* rawNativeInstruction_at(address address) {
|
||||
RawNativeInstruction* instr = RawNativeInstruction::at(address);
|
||||
#ifdef ASSERT
|
||||
instr->verify();
|
||||
#endif // ASSERT
|
||||
return instr;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Load/store register (unsigned scaled immediate)
|
||||
class NativeMovRegMem: public RawNativeInstruction {
|
||||
private:
|
||||
int get_offset_scale() const {
|
||||
return get_unsigned_imm(2, 0, 30);
|
||||
}
|
||||
|
||||
public:
|
||||
int offset() const {
|
||||
return get_unsigned_imm(12, get_offset_scale(), 10);
|
||||
}
|
||||
|
||||
void set_offset(int x);
|
||||
|
||||
void add_offset_in_bytes(int add_offset) {
|
||||
set_offset(offset() + add_offset);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeMovRegMem* nativeMovRegMem_at(address address) {
|
||||
const RawNativeInstruction* instr = rawNativeInstruction_at(address);
|
||||
|
||||
#ifdef COMPILER1
|
||||
// NOP required for C1 patching
|
||||
if (instr->is_nop()) {
|
||||
instr = instr->next_raw();
|
||||
}
|
||||
#endif
|
||||
|
||||
instr->skip_encode_heap_oop(&instr);
|
||||
|
||||
assert(instr->is_ldr_str_reg_unsigned_imm(), "must be");
|
||||
return (NativeMovRegMem*)instr;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
class NativeInstruction : public RawNativeInstruction {
|
||||
public:
|
||||
static NativeInstruction* at(address address) {
|
||||
return (NativeInstruction*)address;
|
||||
}
|
||||
|
||||
public:
|
||||
// No need to consider indirections while parsing NativeInstruction
|
||||
address next_instruction_address() const {
|
||||
return next_raw_instruction_address();
|
||||
}
|
||||
|
||||
// next() is no longer defined to avoid confusion.
|
||||
//
|
||||
// The front end and most classes except for those defined in nativeInst_arm
|
||||
// or relocInfo_arm should only use next_instruction_address(), skipping
|
||||
// over composed instruction and ignoring back-end extensions.
|
||||
//
|
||||
// The back-end can use next_raw() when it knows the instruction sequence
|
||||
// and only wants to skip a single native instruction.
|
||||
};
|
||||
|
||||
inline NativeInstruction* nativeInstruction_at(address address) {
|
||||
NativeInstruction* instr = NativeInstruction::at(address);
|
||||
#ifdef ASSERT
|
||||
instr->verify();
|
||||
#endif // ASSERT
|
||||
return instr;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeInstructionLdrLiteral: public NativeInstruction {
|
||||
public:
|
||||
address literal_address() {
|
||||
address la = instruction_address() + get_signed_offset(19, 5);
|
||||
assert(la != instruction_address(), "literal points to instruction");
|
||||
return la;
|
||||
}
|
||||
|
||||
address after_literal_address() {
|
||||
return literal_address() + wordSize;
|
||||
}
|
||||
|
||||
void set_literal_address(address addr, address pc) {
|
||||
assert(is_ldr_literal(), "must be");
|
||||
int opc = (encoding() >> 30) & 0x3;
|
||||
assert (opc != 0b01 || addr == pc || ((uintx)addr & 7) == 0, "ldr target should be aligned");
|
||||
set_signed_offset(addr - pc, 19, 5);
|
||||
}
|
||||
|
||||
void set_literal_address(address addr) {
|
||||
set_literal_address(addr, instruction_address());
|
||||
}
|
||||
|
||||
address literal_value() {
|
||||
return *(address*)literal_address();
|
||||
}
|
||||
|
||||
void set_literal_value(address dest) {
|
||||
*(address*)literal_address() = dest;
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeInstructionLdrLiteral* nativeLdrLiteral_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_ldr_literal(), "must be");
|
||||
return (NativeInstructionLdrLiteral*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Common class for branch instructions with 26-bit immediate offset: B (unconditional) and BL
|
||||
class NativeInstructionBranchImm26: public NativeInstruction {
|
||||
public:
|
||||
address destination(int adj = 0) const {
|
||||
return instruction_address() + get_signed_offset(26, 0) + adj;
|
||||
}
|
||||
|
||||
void set_destination(address dest) {
|
||||
intptr_t offset = (intptr_t)(dest - instruction_address());
|
||||
assert((offset & 0x3) == 0, "should be aligned");
|
||||
set_signed_offset(offset, 26, 0);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeInstructionBranchImm26* nativeB_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_b(), "must be");
|
||||
return (NativeInstructionBranchImm26*)address;
|
||||
}
|
||||
|
||||
inline NativeInstructionBranchImm26* nativeBL_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_bl(), "must be");
|
||||
return (NativeInstructionBranchImm26*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeInstructionAdrLR: public NativeInstruction {
|
||||
public:
|
||||
// Returns address which is loaded into LR by this instruction.
|
||||
address target_lr_value() {
|
||||
return instruction_address() + get_signed_offset(19, 5);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeInstructionAdrLR* nativeAdrLR_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_adr_aligned_lr(), "must be");
|
||||
return (NativeInstructionAdrLR*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class RawNativeCall: public NativeInstruction {
|
||||
public:
|
||||
|
||||
address return_address() const {
|
||||
if (is_bl()) {
|
||||
return next_raw_instruction_address();
|
||||
|
||||
} else if (is_far_call()) {
|
||||
#ifdef COMPILER2
|
||||
if (next_raw()->is_blr()) {
|
||||
// ldr_literal; blr; ret_addr: b skip_literal;
|
||||
return addr_at(2 * instruction_size);
|
||||
}
|
||||
#endif
|
||||
assert(next_raw()->is_adr_aligned_lr() && next_raw()->next_raw()->is_br(), "must be");
|
||||
return nativeLdrLiteral_at(instruction_address())->after_literal_address();
|
||||
|
||||
} else if (is_ic_call()) {
|
||||
return nativeAdrLR_at(instruction_address())->target_lr_value();
|
||||
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
address destination(int adj = 0) const {
|
||||
if (is_bl()) {
|
||||
return nativeBL_at(instruction_address())->destination(adj);
|
||||
|
||||
} else if (is_far_call()) {
|
||||
return nativeLdrLiteral_at(instruction_address())->literal_value();
|
||||
|
||||
} else if (is_adr_aligned_lr()) {
|
||||
RawNativeInstruction *next = next_raw();
|
||||
if (next->is_b()) {
|
||||
// ic_near_call
|
||||
return nativeB_at(next->instruction_address())->destination(adj);
|
||||
} else if (next->is_far_jump()) {
|
||||
// ic_far_call
|
||||
return nativeLdrLiteral_at(next->instruction_address())->literal_value();
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void set_destination(address dest) {
|
||||
if (is_bl()) {
|
||||
nativeBL_at(instruction_address())->set_destination(dest);
|
||||
return;
|
||||
}
|
||||
if (is_far_call()) {
|
||||
nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
|
||||
OrderAccess::storeload(); // overkill if caller holds lock?
|
||||
return;
|
||||
}
|
||||
if (is_adr_aligned_lr()) {
|
||||
RawNativeInstruction *next = next_raw();
|
||||
if (next->is_b()) {
|
||||
// ic_near_call
|
||||
nativeB_at(next->instruction_address())->set_destination(dest);
|
||||
return;
|
||||
}
|
||||
if (next->is_far_jump()) {
|
||||
// ic_far_call
|
||||
nativeLdrLiteral_at(next->instruction_address())->set_literal_value(dest);
|
||||
OrderAccess::storeload(); // overkill if caller holds lock?
|
||||
return;
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void set_destination_mt_safe(address dest) {
|
||||
assert(CodeCache::contains(dest), "call target should be from code cache (required by ic_call and patchable_call)");
|
||||
set_destination(dest);
|
||||
}
|
||||
|
||||
void verify() {
|
||||
assert(RawNativeInstruction::is_call(), "should be");
|
||||
}
|
||||
|
||||
void verify_alignment() {
|
||||
// Nothing to do on ARM
|
||||
}
|
||||
};
|
||||
|
||||
inline RawNativeCall* rawNativeCall_at(address address) {
|
||||
RawNativeCall * call = (RawNativeCall*)address;
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
class NativeCall: public RawNativeCall {
|
||||
public:
|
||||
|
||||
// NativeCall::next_instruction_address() is used only to define the
|
||||
// range where to look for the relocation information. We need not
|
||||
// walk over composed instructions (as long as the relocation information
|
||||
// is associated to the first instruction).
|
||||
address next_instruction_address() const {
|
||||
return next_raw_instruction_address();
|
||||
}
|
||||
|
||||
static bool is_call_before(address return_address);
|
||||
};
|
||||
|
||||
inline NativeCall* nativeCall_at(address address) {
|
||||
NativeCall * call = (NativeCall*)address;
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
NativeCall* nativeCall_before(address return_address);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeGeneralJump: public NativeInstruction {
|
||||
public:
|
||||
|
||||
address jump_destination() const {
|
||||
return nativeB_at(instruction_address())->destination();
|
||||
}
|
||||
|
||||
static void replace_mt_safe(address instr_addr, address code_buffer);
|
||||
|
||||
static void insert_unconditional(address code_pos, address entry);
|
||||
|
||||
};
|
||||
|
||||
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_b(), "must be");
|
||||
return (NativeGeneralJump*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class RawNativeJump: public NativeInstruction {
|
||||
public:
|
||||
|
||||
address jump_destination(int adj = 0) const {
|
||||
if (is_b()) {
|
||||
address a = nativeB_at(instruction_address())->destination(adj);
|
||||
// Jump destination -1 is encoded as a jump to self
|
||||
if (a == instruction_address()) {
|
||||
return (address)-1;
|
||||
}
|
||||
return a;
|
||||
} else {
|
||||
assert(is_far_jump(), "should be");
|
||||
return nativeLdrLiteral_at(instruction_address())->literal_value();
|
||||
}
|
||||
}
|
||||
|
||||
void set_jump_destination(address dest) {
|
||||
if (is_b()) {
|
||||
// Jump destination -1 is encoded as a jump to self
|
||||
if (dest == (address)-1) {
|
||||
dest = instruction_address();
|
||||
}
|
||||
nativeB_at(instruction_address())->set_destination(dest);
|
||||
} else {
|
||||
assert(is_far_jump(), "should be");
|
||||
nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline RawNativeJump* rawNativeJump_at(address address) {
|
||||
assert(rawNativeInstruction_at(address)->is_jump(), "must be");
|
||||
return (RawNativeJump*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeMovConstReg: public NativeInstruction {
|
||||
|
||||
NativeMovConstReg *adjust() const {
|
||||
return (NativeMovConstReg *)adjust(this);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static RawNativeInstruction *adjust(const RawNativeInstruction *ni) {
|
||||
#ifdef COMPILER1
|
||||
// NOP required for C1 patching
|
||||
if (ni->is_nop()) {
|
||||
return ni->next_raw();
|
||||
}
|
||||
#endif
|
||||
return (RawNativeInstruction *)ni;
|
||||
}
|
||||
|
||||
intptr_t _data() const;
|
||||
void set_data(intptr_t x);
|
||||
|
||||
intptr_t data() const {
|
||||
return adjust()->_data();
|
||||
}
|
||||
|
||||
bool is_pc_relative() {
|
||||
return adjust()->is_ldr_literal();
|
||||
}
|
||||
|
||||
void _set_pc_relative_offset(address addr, address pc) {
|
||||
assert(is_ldr_literal(), "must be");
|
||||
nativeLdrLiteral_at(instruction_address())->set_literal_address(addr, pc);
|
||||
}
|
||||
|
||||
void set_pc_relative_offset(address addr, address pc) {
|
||||
NativeMovConstReg *ni = adjust();
|
||||
int dest_adj = ni->instruction_address() - instruction_address();
|
||||
ni->_set_pc_relative_offset(addr, pc + dest_adj);
|
||||
}
|
||||
|
||||
address _next_instruction_address() const {
|
||||
#ifdef COMPILER2
|
||||
if (is_movz()) {
|
||||
// narrow constant
|
||||
RawNativeInstruction* ni = next_raw();
|
||||
assert(ni->is_movk(), "movz;movk expected");
|
||||
return ni->next_raw_instruction_address();
|
||||
}
|
||||
#endif
|
||||
assert(is_ldr_literal(), "must be");
|
||||
return NativeInstruction::next_raw_instruction_address();
|
||||
}
|
||||
|
||||
address next_instruction_address() const {
|
||||
return adjust()->_next_instruction_address();
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
||||
RawNativeInstruction* ni = rawNativeInstruction_at(address);
|
||||
|
||||
ni = NativeMovConstReg::adjust(ni);
|
||||
|
||||
assert(ni->is_mov_slow() || ni->is_ldr_literal(), "must be");
|
||||
return (NativeMovConstReg*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeJump: public RawNativeJump {
|
||||
public:
|
||||
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry);
|
||||
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
};
|
||||
|
||||
inline NativeJump* nativeJump_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_jump(), "must be");
|
||||
return (NativeJump*)address;
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_NATIVEINST_ARM_64_HPP
|
@ -32,12 +32,6 @@ const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::num_fpr +
|
||||
|
||||
const char* RegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
#ifdef AARCH64
|
||||
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
|
||||
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
|
||||
"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
|
||||
"x24", "x25", "x26", "x27", "x28", "fp", "lr", "xzr", "sp"
|
||||
#else
|
||||
"r0", "r1", "r2", "r3", "r4", "r5", "r6",
|
||||
#if (FP_REG_NUM == 7)
|
||||
"fp",
|
||||
@ -51,19 +45,12 @@ const char* RegisterImpl::name() const {
|
||||
"r11",
|
||||
#endif
|
||||
"r12", "sp", "lr", "pc"
|
||||
#endif // AARCH64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
|
||||
const char* FloatRegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
#ifdef AARCH64
|
||||
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
|
||||
"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
|
||||
"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
|
||||
"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
|
||||
#else
|
||||
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
|
||||
"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
|
||||
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
|
||||
@ -74,7 +61,6 @@ const char* FloatRegisterImpl::name() const {
|
||||
"s48", "s49?","s50", "s51?","s52", "s53?","s54", "s55?",
|
||||
"s56", "s57?","s58", "s59?","s60", "s61?","s62", "s63?"
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "fnoreg";
|
||||
}
|
||||
|
@ -66,7 +66,6 @@ typedef VMRegImpl* VMReg;
|
||||
#define R9_IS_SCRATCHED 0
|
||||
#endif
|
||||
|
||||
#ifndef AARCH64
|
||||
// FP_REG_NUM
|
||||
//
|
||||
// The ARM ABI does not state which register is used for the frame pointer.
|
||||
@ -77,7 +76,6 @@ typedef VMRegImpl* VMReg;
|
||||
// Default: FP is R11
|
||||
#define FP_REG_NUM 11
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
|
||||
// ALIGN_WIDE_ARGUMENTS
|
||||
//
|
||||
@ -113,32 +111,6 @@ typedef VMRegImpl* VMReg;
|
||||
#define R14 ((Register)14)
|
||||
#define R15 ((Register)15)
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
#define R16 ((Register)16)
|
||||
#define R17 ((Register)17)
|
||||
#define R18 ((Register)18)
|
||||
#define R19 ((Register)19)
|
||||
#define R20 ((Register)20)
|
||||
#define R21 ((Register)21)
|
||||
#define R22 ((Register)22)
|
||||
#define R23 ((Register)23)
|
||||
#define R24 ((Register)24)
|
||||
#define R25 ((Register)25)
|
||||
#define R26 ((Register)26)
|
||||
#define R27 ((Register)27)
|
||||
#define R28 ((Register)28)
|
||||
#define R29 ((Register)29)
|
||||
#define R30 ((Register)30)
|
||||
#define ZR ((Register)31)
|
||||
#define SP ((Register)32)
|
||||
|
||||
#define FP R29
|
||||
#define LR R30
|
||||
|
||||
#define altFP_7_11 R7
|
||||
|
||||
#else // !AARCH64
|
||||
|
||||
#define FP ((Register)FP_REG_NUM)
|
||||
|
||||
@ -158,7 +130,6 @@ typedef VMRegImpl* VMReg;
|
||||
#define LR R14
|
||||
#define PC R15
|
||||
|
||||
#endif // !AARCH64
|
||||
|
||||
|
||||
class RegisterImpl;
|
||||
@ -171,11 +142,7 @@ inline Register as_Register(int encoding) {
|
||||
class RegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
#ifdef AARCH64
|
||||
number_of_gprs = 31,
|
||||
zr_sp_encoding = 31,
|
||||
#endif
|
||||
number_of_registers = AARCH64_ONLY(number_of_gprs + 2) NOT_AARCH64(16)
|
||||
number_of_registers = 16
|
||||
};
|
||||
|
||||
Register successor() const { return as_Register(encoding() + 1); }
|
||||
@ -188,19 +155,10 @@ class RegisterImpl : public AbstractRegisterImpl {
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
const char* name() const;
|
||||
|
||||
#ifdef AARCH64
|
||||
int encoding_with_zr() const { assert (is_valid_gpr_or_zr(), "invalid register"); return (this == ZR) ? zr_sp_encoding : value(); }
|
||||
int encoding_with_sp() const { assert (is_valid_gpr_or_sp(), "invalid register"); return (this == SP) ? zr_sp_encoding : value(); }
|
||||
#endif
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
|
||||
|
||||
#ifdef AARCH64
|
||||
bool is_valid_gpr() const { return (0 <= value() && value() < number_of_gprs); }
|
||||
bool is_valid_gpr_or_zr() const { return is_valid_gpr() || (this == ZR); }
|
||||
bool is_valid_gpr_or_sp() const { return is_valid_gpr() || (this == SP); }
|
||||
#endif
|
||||
};
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
|
||||
@ -217,11 +175,7 @@ inline FloatRegister as_FloatRegister(int encoding) {
|
||||
class FloatRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
#ifdef AARCH64
|
||||
number_of_registers = 32
|
||||
#else
|
||||
number_of_registers = NOT_COMPILER2(32) COMPILER2_PRESENT(64)
|
||||
#endif
|
||||
};
|
||||
|
||||
inline friend FloatRegister as_FloatRegister(int encoding);
|
||||
@ -234,7 +188,6 @@ class FloatRegisterImpl : public AbstractRegisterImpl {
|
||||
|
||||
const char* name() const;
|
||||
|
||||
#ifndef AARCH64
|
||||
int hi_bits() const {
|
||||
return (encoding() >> 1) & 0xf;
|
||||
}
|
||||
@ -246,54 +199,10 @@ class FloatRegisterImpl : public AbstractRegisterImpl {
|
||||
int hi_bit() const {
|
||||
return encoding() >> 5;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
};
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V0, ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V1, ( 1));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V2, ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V3, ( 3));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V4, ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V5, ( 5));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V6, ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V7, ( 7));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V8, ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V9, ( 9));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V10, (10));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V11, (11));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V12, (12));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V13, (13));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V14, (14));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V15, (15));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V16, (16));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V17, (17));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V18, (18));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V19, (19));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V20, (20));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V21, (21));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V22, (22));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V23, (23));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V24, (24));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V25, (25));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V26, (26));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V27, (27));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V28, (28));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V29, (29));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V30, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V31, (31));
|
||||
|
||||
#define S0 V0
|
||||
#define S1_reg V1
|
||||
#define Stemp V31
|
||||
|
||||
#define D0 V0
|
||||
#define D1 V1
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
/*
|
||||
* S1-S6 are named with "_reg" suffix to avoid conflict with
|
||||
@ -366,16 +275,15 @@ CONSTANT_REGISTER_DECLARATION(FloatRegister, D29, (58));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D30, (60));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D31, (62));
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
log_vmregs_per_word = LogBytesPerWord - LogBytesPerInt, // VMRegs are of 4-byte size
|
||||
#ifdef COMPILER2
|
||||
log_bytes_per_fpr = AARCH64_ONLY(4) NOT_AARCH64(2), // quad vectors
|
||||
log_bytes_per_fpr = 2, // quad vectors
|
||||
#else
|
||||
log_bytes_per_fpr = AARCH64_ONLY(3) NOT_AARCH64(2), // double vectors
|
||||
log_bytes_per_fpr = 2, // double vectors
|
||||
#endif
|
||||
log_words_per_fpr = log_bytes_per_fpr - LogBytesPerWord,
|
||||
words_per_fpr = 1 << log_words_per_fpr,
|
||||
@ -388,17 +296,13 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
max_gpr0 = num_gpr,
|
||||
num_fpr = FloatRegisterImpl::number_of_registers << log_vmregs_per_fpr,
|
||||
max_fpr0 = max_gpr0 + num_fpr,
|
||||
number_of_registers = num_gpr + num_fpr +
|
||||
// TODO-AARCH64 revise
|
||||
1+1 // APSR and FPSCR so that c2's REG_COUNT <= ConcreteRegisterImpl::number_of_registers
|
||||
number_of_registers = num_gpr + num_fpr + 1+1 // APSR and FPSCR so that c2's REG_COUNT <= ConcreteRegisterImpl::number_of_registers
|
||||
};
|
||||
|
||||
static const int max_gpr;
|
||||
static const int max_fpr;
|
||||
};
|
||||
|
||||
// TODO-AARCH64 revise the following definitions
|
||||
|
||||
class VFPSystemRegisterImpl;
|
||||
typedef VFPSystemRegisterImpl* VFPSystemRegister;
|
||||
class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
@ -414,33 +318,21 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
/*
|
||||
* Register definitions shared across interpreter and compiler
|
||||
*/
|
||||
#define Rexception_obj AARCH64_ONLY(R19) NOT_AARCH64(R4)
|
||||
#define Rexception_pc AARCH64_ONLY(R20) NOT_AARCH64(R5)
|
||||
|
||||
#ifdef AARCH64
|
||||
#define Rheap_base R27
|
||||
#endif // AARCH64
|
||||
#define Rexception_obj R4
|
||||
#define Rexception_pc R5
|
||||
|
||||
/*
|
||||
* Interpreter register definitions common to C++ and template interpreters.
|
||||
*/
|
||||
#ifdef AARCH64
|
||||
#define Rlocals R23
|
||||
#define Rmethod R26
|
||||
#define Rthread R28
|
||||
#define Rtemp R16
|
||||
#define Rtemp2 R17
|
||||
#else
|
||||
#define Rlocals R8
|
||||
#define Rmethod R9
|
||||
#define Rthread R10
|
||||
#define Rtemp R12
|
||||
#endif // AARCH64
|
||||
|
||||
// Interpreter calling conventions
|
||||
|
||||
#define Rparams AARCH64_ONLY(R8) NOT_AARCH64(SP)
|
||||
#define Rsender_sp AARCH64_ONLY(R19) NOT_AARCH64(R4)
|
||||
#define Rparams SP
|
||||
#define Rsender_sp R4
|
||||
|
||||
// JSR292
|
||||
// Note: R5_mh is needed only during the call setup, including adapters
|
||||
@ -479,25 +371,23 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
#define D1_tmp D1
|
||||
|
||||
// Temporary registers saved across VM calls (according to C calling conventions)
|
||||
#define Rtmp_save0 AARCH64_ONLY(R19) NOT_AARCH64(R4)
|
||||
#define Rtmp_save1 AARCH64_ONLY(R20) NOT_AARCH64(R5)
|
||||
#define Rtmp_save0 R4
|
||||
#define Rtmp_save1 R5
|
||||
|
||||
// Cached TOS value
|
||||
#define R0_tos R0
|
||||
|
||||
#ifndef AARCH64
|
||||
#define R0_tos_lo R0
|
||||
#define R1_tos_hi R1
|
||||
#endif
|
||||
|
||||
#define S0_tos S0
|
||||
#define D0_tos D0
|
||||
|
||||
// Dispatch table
|
||||
#define RdispatchTable AARCH64_ONLY(R22) NOT_AARCH64(R6)
|
||||
#define RdispatchTable R6
|
||||
|
||||
// Bytecode pointer
|
||||
#define Rbcp AARCH64_ONLY(R24) NOT_AARCH64(altFP_7_11)
|
||||
#define Rbcp altFP_7_11
|
||||
|
||||
// Pre-loaded next bytecode for the dispatch
|
||||
#define R3_bytecode R3
|
||||
@ -507,7 +397,7 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
#define R4_ArrayIndexOutOfBounds_index R4
|
||||
|
||||
// Interpreter expression stack top
|
||||
#define Rstack_top AARCH64_ONLY(R25) NOT_AARCH64(SP)
|
||||
#define Rstack_top SP
|
||||
|
||||
/*
|
||||
* Linux 32-bit ARM C ABI Register calling conventions
|
||||
@ -529,28 +419,14 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
* R13 (SP) Stack Pointer callee
|
||||
* R14 (LR) Link register
|
||||
* R15 (PC) Program Counter
|
||||
*
|
||||
* TODO-AARCH64: document AArch64 ABI
|
||||
*
|
||||
*/
|
||||
#define c_rarg0 R0
|
||||
#define c_rarg1 R1
|
||||
#define c_rarg2 R2
|
||||
#define c_rarg3 R3
|
||||
|
||||
#ifdef AARCH64
|
||||
#define c_rarg4 R4
|
||||
#define c_rarg5 R5
|
||||
#define c_rarg6 R6
|
||||
#define c_rarg7 R7
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
#define GPR_PARAMS 8
|
||||
#define FPR_PARAMS 8
|
||||
#else
|
||||
#define GPR_PARAMS 4
|
||||
#endif
|
||||
|
||||
|
||||
// Java ABI
|
||||
@ -560,11 +436,5 @@ class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
#define j_rarg2 c_rarg2
|
||||
#define j_rarg3 c_rarg3
|
||||
|
||||
#ifdef AARCH64
|
||||
#define j_rarg4 c_rarg4
|
||||
#define j_rarg5 c_rarg5
|
||||
#define j_rarg6 c_rarg6
|
||||
#define j_rarg7 c_rarg7
|
||||
#endif
|
||||
|
||||
#endif // CPU_ARM_VM_REGISTER_ARM_HPP
|
||||
|
@ -31,42 +31,6 @@
|
||||
REGISTER_DEFINITION(Register, noreg);
|
||||
REGISTER_DEFINITION(FloatRegister, fnoreg);
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, V0);
|
||||
REGISTER_DEFINITION(FloatRegister, V1);
|
||||
REGISTER_DEFINITION(FloatRegister, V2);
|
||||
REGISTER_DEFINITION(FloatRegister, V3);
|
||||
REGISTER_DEFINITION(FloatRegister, V4);
|
||||
REGISTER_DEFINITION(FloatRegister, V5);
|
||||
REGISTER_DEFINITION(FloatRegister, V6);
|
||||
REGISTER_DEFINITION(FloatRegister, V7);
|
||||
REGISTER_DEFINITION(FloatRegister, V8);
|
||||
REGISTER_DEFINITION(FloatRegister, V9);
|
||||
REGISTER_DEFINITION(FloatRegister, V10);
|
||||
REGISTER_DEFINITION(FloatRegister, V11);
|
||||
REGISTER_DEFINITION(FloatRegister, V12);
|
||||
REGISTER_DEFINITION(FloatRegister, V13);
|
||||
REGISTER_DEFINITION(FloatRegister, V14);
|
||||
REGISTER_DEFINITION(FloatRegister, V15);
|
||||
REGISTER_DEFINITION(FloatRegister, V16);
|
||||
REGISTER_DEFINITION(FloatRegister, V17);
|
||||
REGISTER_DEFINITION(FloatRegister, V18);
|
||||
REGISTER_DEFINITION(FloatRegister, V19);
|
||||
REGISTER_DEFINITION(FloatRegister, V20);
|
||||
REGISTER_DEFINITION(FloatRegister, V21);
|
||||
REGISTER_DEFINITION(FloatRegister, V22);
|
||||
REGISTER_DEFINITION(FloatRegister, V23);
|
||||
REGISTER_DEFINITION(FloatRegister, V24);
|
||||
REGISTER_DEFINITION(FloatRegister, V25);
|
||||
REGISTER_DEFINITION(FloatRegister, V26);
|
||||
REGISTER_DEFINITION(FloatRegister, V27);
|
||||
REGISTER_DEFINITION(FloatRegister, V28);
|
||||
REGISTER_DEFINITION(FloatRegister, V29);
|
||||
REGISTER_DEFINITION(FloatRegister, V30);
|
||||
REGISTER_DEFINITION(FloatRegister, V31);
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, S0);
|
||||
REGISTER_DEFINITION(FloatRegister, S1_reg);
|
||||
@ -134,4 +98,3 @@ REGISTER_DEFINITION(FloatRegister, D29);
|
||||
REGISTER_DEFINITION(FloatRegister, D30);
|
||||
REGISTER_DEFINITION(FloatRegister, D31);
|
||||
|
||||
#endif //AARCH64
|
||||
|
@ -35,21 +35,6 @@
|
||||
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
|
||||
NativeMovConstReg* ni = nativeMovConstReg_at(addr());
|
||||
#if defined(AARCH64) && defined(COMPILER2)
|
||||
if (ni->is_movz()) {
|
||||
assert(type() == relocInfo::oop_type, "!");
|
||||
if (verify_only) {
|
||||
uintptr_t d = ni->data();
|
||||
guarantee((d >> 32) == 0, "not narrow oop");
|
||||
narrowOop no = d;
|
||||
oop o = CompressedOops::decode(no);
|
||||
guarantee(cast_from_oop<intptr_t>(o) == (intptr_t)x, "instructions must match");
|
||||
} else {
|
||||
ni->set_data((intptr_t)x);
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (verify_only) {
|
||||
guarantee(ni->data() == (intptr_t)(x + o), "instructions must match");
|
||||
} else {
|
||||
@ -69,21 +54,16 @@ address Relocation::pd_call_destination(address orig_addr) {
|
||||
|
||||
RawNativeInstruction* ni = rawNativeInstruction_at(pc);
|
||||
|
||||
#if (!defined(AARCH64))
|
||||
if (NOT_AARCH64(ni->is_add_lr()) AARCH64_ONLY(ni->is_adr_aligned_lr())) {
|
||||
// On arm32, skip the optional 'add LR, PC, #offset'
|
||||
if (ni->is_add_lr()) {
|
||||
// Skip the optional 'add LR, PC, #offset'
|
||||
// (Allowing the jump support code to handle fat_call)
|
||||
pc = ni->next_raw_instruction_address();
|
||||
ni = nativeInstruction_at(pc);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (AARCH64_ONLY(ni->is_call()) NOT_AARCH64(ni->is_bl())) {
|
||||
// For arm32, fat_call are handled by is_jump for the new 'ni',
|
||||
if (ni->is_bl()) {
|
||||
// Fat_call are handled by is_jump for the new 'ni',
|
||||
// requiring only to support is_bl.
|
||||
//
|
||||
// For AARCH64, skipping a leading adr is not sufficient
|
||||
// to reduce calls to a simple bl.
|
||||
return rawNativeCall_at(pc)->destination(adj);
|
||||
}
|
||||
|
||||
@ -98,21 +78,16 @@ void Relocation::pd_set_call_destination(address x) {
|
||||
address pc = addr();
|
||||
NativeInstruction* ni = nativeInstruction_at(pc);
|
||||
|
||||
#if (!defined(AARCH64))
|
||||
if (NOT_AARCH64(ni->is_add_lr()) AARCH64_ONLY(ni->is_adr_aligned_lr())) {
|
||||
// On arm32, skip the optional 'add LR, PC, #offset'
|
||||
if (ni->is_add_lr()) {
|
||||
// Skip the optional 'add LR, PC, #offset'
|
||||
// (Allowing the jump support code to handle fat_call)
|
||||
pc = ni->next_raw_instruction_address();
|
||||
ni = nativeInstruction_at(pc);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (AARCH64_ONLY(ni->is_call()) NOT_AARCH64(ni->is_bl())) {
|
||||
// For arm32, fat_call are handled by is_jump for the new 'ni',
|
||||
if (ni->is_bl()) {
|
||||
// Fat_call are handled by is_jump for the new 'ni',
|
||||
// requiring only to support is_bl.
|
||||
//
|
||||
// For AARCH64, skipping a leading adr is not sufficient
|
||||
// to reduce calls to a simple bl.
|
||||
rawNativeCall_at(pc)->set_destination(x);
|
||||
return;
|
||||
}
|
||||
@ -138,15 +113,6 @@ void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffe
|
||||
|
||||
void metadata_Relocation::pd_fix_value(address x) {
|
||||
assert(! addr_in_const(), "Do not use");
|
||||
#ifdef AARCH64
|
||||
#ifdef COMPILER2
|
||||
NativeMovConstReg* ni = nativeMovConstReg_at(addr());
|
||||
if (ni->is_mov_slow()) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
set_value(x);
|
||||
#else
|
||||
if (!VM_Version::supports_movw()) {
|
||||
set_value(x);
|
||||
#ifdef ASSERT
|
||||
@ -165,5 +131,4 @@ void metadata_Relocation::pd_fix_value(address x) {
|
||||
// assert(ni->data() == (int)x, "metadata relocation mismatch");
|
||||
#endif
|
||||
}
|
||||
#endif // !AARCH64
|
||||
}
|
||||
|
@ -126,15 +126,8 @@ void OptoRuntime::generate_exception_blob() {
|
||||
|
||||
// Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
|
||||
__ ldr(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
|
||||
#ifdef AARCH64
|
||||
Label skip;
|
||||
__ cbz(Rtemp, skip);
|
||||
__ mov(SP, Rmh_SP_save);
|
||||
__ bind(skip);
|
||||
#else
|
||||
__ cmp(Rtemp, 0);
|
||||
__ mov(SP, Rmh_SP_save, ne);
|
||||
#endif
|
||||
|
||||
// R0 contains handler address
|
||||
// Since this may be the deopt blob we must set R5 to look like we returned
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -27,13 +27,9 @@
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#ifndef AARCH64
|
||||
address StubRoutines::Arm::_idiv_irem_entry = NULL;
|
||||
#endif
|
||||
|
||||
address StubRoutines::Arm::_partial_subtype_check = NULL;
|
||||
|
||||
#ifndef AARCH64
|
||||
address StubRoutines::_atomic_load_long_entry = NULL;
|
||||
address StubRoutines::_atomic_store_long_entry = NULL;
|
||||
#endif
|
||||
|
@ -40,16 +40,12 @@ class Arm {
|
||||
|
||||
private:
|
||||
|
||||
#ifndef AARCH64
|
||||
static address _idiv_irem_entry;
|
||||
#endif
|
||||
static address _partial_subtype_check;
|
||||
|
||||
public:
|
||||
|
||||
#ifndef AARCH64
|
||||
static address idiv_irem_entry() { return _idiv_irem_entry; }
|
||||
#endif
|
||||
static address partial_subtype_check() { return _partial_subtype_check; }
|
||||
};
|
||||
|
||||
@ -57,13 +53,11 @@ class Arm {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
static address _atomic_load_long_entry;
|
||||
static address _atomic_store_long_entry;
|
||||
|
||||
static address atomic_load_long_entry() { return _atomic_load_long_entry; }
|
||||
static address atomic_store_long_entry() { return _atomic_store_long_entry; }
|
||||
#endif
|
||||
|
||||
|
||||
#endif // CPU_ARM_VM_STUBROUTINES_ARM_HPP
|
||||
|
@ -65,7 +65,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
address entry = __ pc();
|
||||
|
||||
// callee-save register for saving LR, shared with generate_native_entry
|
||||
const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0);
|
||||
const Register Rsaved_ret_addr = Rtmp_save0;
|
||||
|
||||
__ mov(Rsaved_ret_addr, LR);
|
||||
|
||||
@ -73,24 +73,6 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ mov(R2, Rlocals);
|
||||
__ mov(R3, SP);
|
||||
|
||||
#ifdef AARCH64
|
||||
// expand expr. stack and extended SP to avoid cutting SP in call_VM
|
||||
__ mov(Rstack_top, SP);
|
||||
__ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
|
||||
__ check_stack_top();
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false);
|
||||
|
||||
__ ldp(ZR, c_rarg1, Address(SP, 2*wordSize, post_indexed));
|
||||
__ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed));
|
||||
__ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed));
|
||||
__ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed));
|
||||
|
||||
__ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed));
|
||||
__ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed));
|
||||
__ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed));
|
||||
__ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed));
|
||||
#else
|
||||
|
||||
// Safer to save R9 (when scratched) since callers may have been
|
||||
// written assuming R9 survives. This is suboptimal but
|
||||
@ -110,7 +92,6 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
// eliminate any gain imposed by avoiding 8 double word loads.
|
||||
__ fldmiad(SP, FloatRegisterSet(D0, 8), writeback);
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
|
||||
__ ret(Rsaved_ret_addr);
|
||||
|
||||
@ -129,10 +110,6 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
|
||||
address entry_point = __ pc();
|
||||
|
||||
#ifdef AARCH64
|
||||
__ restore_sp_after_call(Rtemp);
|
||||
__ restore_stack_top();
|
||||
#endif
|
||||
|
||||
__ empty_expression_stack();
|
||||
|
||||
@ -274,16 +251,11 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
|
||||
__ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ restore_sp_after_call(Rtemp); // Restore SP to extended SP
|
||||
__ restore_stack_top();
|
||||
#else
|
||||
// Restore stack bottom in case i2c adjusted stack
|
||||
__ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// and NULL it as marker that SP is now tos until next java call
|
||||
__ mov(Rtemp, (int)NULL_WORD);
|
||||
__ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
#endif // AARCH64
|
||||
|
||||
__ restore_method();
|
||||
__ restore_bcp();
|
||||
@ -299,9 +271,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ check_stack_top();
|
||||
__ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize));
|
||||
|
||||
#ifndef AARCH64
|
||||
__ convert_retval_to_tos(state);
|
||||
#endif // !AARCH64
|
||||
|
||||
__ check_and_handle_popframe();
|
||||
__ check_and_handle_earlyret();
|
||||
@ -317,15 +287,10 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
|
||||
|
||||
__ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ restore_sp_after_call(Rtemp); // Restore SP to extended SP
|
||||
__ restore_stack_top();
|
||||
#else
|
||||
// The stack is not extended by deopt but we must NULL last_sp as this
|
||||
// entry is like a "return".
|
||||
__ mov(Rtemp, 0);
|
||||
__ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
#endif // AARCH64
|
||||
|
||||
__ restore_method();
|
||||
__ restore_bcp();
|
||||
@ -351,32 +316,6 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
|
||||
#ifdef AARCH64
|
||||
address entry = __ pc();
|
||||
switch (type) {
|
||||
case T_BOOLEAN:
|
||||
__ tst(R0, 0xff);
|
||||
__ cset(R0, ne);
|
||||
break;
|
||||
case T_CHAR : __ zero_extend(R0, R0, 16); break;
|
||||
case T_BYTE : __ sign_extend(R0, R0, 8); break;
|
||||
case T_SHORT : __ sign_extend(R0, R0, 16); break;
|
||||
case T_INT : // fall through
|
||||
case T_LONG : // fall through
|
||||
case T_VOID : // fall through
|
||||
case T_FLOAT : // fall through
|
||||
case T_DOUBLE : /* nothing to do */ break;
|
||||
case T_OBJECT :
|
||||
// retrieve result from frame
|
||||
__ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
|
||||
// and verify it
|
||||
__ verify_oop(R0);
|
||||
break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
__ ret();
|
||||
return entry;
|
||||
#else
|
||||
// Result handlers are not used on 32-bit ARM
|
||||
// since the returned value is already in appropriate format.
|
||||
__ should_not_reach_here(); // to avoid empty code block
|
||||
@ -384,7 +323,6 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
|
||||
// The result handler non-zero indicates an object is returned and this is
|
||||
// used in the native entry code.
|
||||
return type == T_OBJECT ? (address)(-1) : NULL;
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
|
||||
@ -466,11 +404,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow,
|
||||
|
||||
__ add(Ricnt, Ricnt, InvocationCounter::count_increment);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits
|
||||
#else
|
||||
__ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits
|
||||
#endif // AARCH64
|
||||
|
||||
__ str_32(Ricnt, invocation_counter); // save invocation count
|
||||
__ add(Ricnt, Ricnt, Rbcnt); // add both counters
|
||||
@ -522,13 +456,12 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// Registers on entry:
|
||||
//
|
||||
// R3 = number of additional locals
|
||||
// R11 = max expression stack slots (AArch64 only)
|
||||
// Rthread
|
||||
// Rmethod
|
||||
// Registers used: R0, R1, R2, Rtemp.
|
||||
|
||||
const Register Radditional_locals = R3;
|
||||
const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2);
|
||||
const Register RmaxStack = R2;
|
||||
|
||||
// monitor entry size
|
||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
@ -545,10 +478,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
|
||||
__ ldr(R0, Address(Rthread, Thread::stack_base_offset()));
|
||||
__ ldr(R1, Address(Rthread, Thread::stack_size_offset()));
|
||||
#ifndef AARCH64
|
||||
__ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
|
||||
__ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset()));
|
||||
#endif // !AARCH64
|
||||
__ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words());
|
||||
|
||||
// reserve space for additional locals
|
||||
@ -562,16 +493,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
|
||||
__ cmp(Rtemp, R0);
|
||||
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
__ b(L, hi);
|
||||
__ mov(SP, Rsender_sp); // restore SP
|
||||
__ b(StubRoutines::throw_StackOverflowError_entry());
|
||||
__ bind(L);
|
||||
#else
|
||||
__ mov(SP, Rsender_sp, ls); // restore SP
|
||||
__ b(StubRoutines::throw_StackOverflowError_entry(), ls);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
@ -595,14 +518,9 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
// get synchronization object
|
||||
{ Label done;
|
||||
__ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
|
||||
#ifdef AARCH64
|
||||
__ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
|
||||
__ tbz(Rtemp, JVM_ACC_STATIC_BIT, done);
|
||||
#else
|
||||
__ tst(Rtemp, JVM_ACC_STATIC);
|
||||
__ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case)
|
||||
__ b(done, eq);
|
||||
#endif // AARCH64
|
||||
__ load_mirror(R0, Rmethod, Rtemp);
|
||||
__ bind(done);
|
||||
__ resolve(IS_NOT_NULL, R0);
|
||||
@ -610,12 +528,6 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
|
||||
// add space for monitor & lock
|
||||
|
||||
#ifdef AARCH64
|
||||
__ check_extended_sp(Rtemp);
|
||||
__ sub(SP, SP, entry_size); // adjust extended SP
|
||||
__ mov(Rtemp, SP);
|
||||
__ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
|
||||
#endif // AARCH64
|
||||
|
||||
__ sub(Rstack_top, Rstack_top, entry_size);
|
||||
__ check_stack_top_on_expansion();
|
||||
@ -628,90 +540,6 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
__ lock_object(R1);
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
//
|
||||
// Generate a fixed interpreter frame. This is identical setup for interpreted methods
|
||||
// and for native methods hence the shared code.
|
||||
//
|
||||
// On entry:
|
||||
// R10 = ConstMethod
|
||||
// R11 = max expr. stack (in slots), if !native_call
|
||||
//
|
||||
// On exit:
|
||||
// Rbcp, Rstack_top are initialized, SP is extended
|
||||
//
|
||||
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
// Incoming registers
|
||||
const Register RconstMethod = R10;
|
||||
const Register RmaxStack = R11;
|
||||
// Temporary registers
|
||||
const Register RextendedSP = R0;
|
||||
const Register Rcache = R1;
|
||||
const Register Rmdp = ProfileInterpreter ? R2 : ZR;
|
||||
|
||||
// Generates the following stack layout (stack grows up in this picture):
|
||||
//
|
||||
// [ expr. stack bottom ]
|
||||
// [ saved Rbcp ]
|
||||
// [ current Rlocals ]
|
||||
// [ cache ]
|
||||
// [ mdx ]
|
||||
// [ mirror ]
|
||||
// [ Method* ]
|
||||
// [ extended SP ]
|
||||
// [ expr. stack top ]
|
||||
// [ sender_sp ]
|
||||
// [ saved FP ] <--- FP
|
||||
// [ saved LR ]
|
||||
|
||||
// initialize fixed part of activation frame
|
||||
__ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed));
|
||||
__ mov(FP, SP); // establish new FP
|
||||
|
||||
// setup Rbcp
|
||||
if (native_call) {
|
||||
__ mov(Rbcp, ZR); // bcp = 0 for native calls
|
||||
} else {
|
||||
__ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase
|
||||
}
|
||||
|
||||
// Rstack_top & RextendedSP
|
||||
__ sub(Rstack_top, SP, 10*wordSize);
|
||||
if (native_call) {
|
||||
__ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling
|
||||
} else {
|
||||
__ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
|
||||
__ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes);
|
||||
}
|
||||
__ mov(SP, RextendedSP);
|
||||
__ check_stack_top();
|
||||
|
||||
// Load Rmdp
|
||||
if (ProfileInterpreter) {
|
||||
__ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
|
||||
__ tst(Rtemp, Rtemp);
|
||||
__ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()));
|
||||
__ csel(Rmdp, ZR, Rtemp, eq);
|
||||
}
|
||||
|
||||
// Load Rcache
|
||||
__ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset()));
|
||||
__ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes()));
|
||||
// Get mirror and store it in the frame as GC root for this Method*
|
||||
__ load_mirror(Rtemp, Rmethod, Rtemp);
|
||||
|
||||
// Build fixed frame
|
||||
__ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize));
|
||||
__ stp(Rlocals, Rcache, Address(FP, -8*wordSize));
|
||||
__ stp(Rmdp, Rtemp, Address(FP, -6*wordSize));
|
||||
__ stp(Rmethod, RextendedSP, Address(FP, -4*wordSize));
|
||||
__ stp(ZR, Rsender_sp, Address(FP, -2*wordSize));
|
||||
assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken");
|
||||
assert(frame::interpreter_frame_stack_top_offset == -2, "stack top broken");
|
||||
}
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
//
|
||||
// Generate a fixed interpreter frame. This is identical setup for interpreted methods
|
||||
@ -773,7 +601,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ str(SP, Address(SP, 0)); // set expression stack bottom
|
||||
}
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
// End of helpers
|
||||
|
||||
@ -802,7 +629,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
//
|
||||
// Stack layout immediately at entry
|
||||
//
|
||||
// [ optional padding(*)] <--- SP (AArch64)
|
||||
// [ parameter n ] <--- Rparams (SP on 32-bit ARM)
|
||||
// ...
|
||||
// [ parameter 1 ]
|
||||
@ -816,7 +642,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
// local variables follow incoming parameters immediately; i.e.
|
||||
// the return address is saved at the end of the locals.
|
||||
//
|
||||
// [ reserved stack (*) ] <--- SP (AArch64)
|
||||
// [ expr. stack ] <--- Rstack_top (SP on 32-bit ARM)
|
||||
// [ monitor entry ]
|
||||
// ...
|
||||
@ -832,10 +657,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
// 32-bit ARM:
|
||||
// [ last_sp ]
|
||||
//
|
||||
// AArch64:
|
||||
// [ extended SP (*) ]
|
||||
// [ stack top (*) ]
|
||||
//
|
||||
// [ sender_sp ]
|
||||
// [ saved FP ] <--- FP
|
||||
// [ saved LR ]
|
||||
@ -847,8 +668,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
// ...
|
||||
// [ parameter 1 ] <--- Rlocals
|
||||
//
|
||||
// (*) - AArch64 only
|
||||
//
|
||||
|
||||
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// Code: _aload_0, _getfield, _areturn
|
||||
@ -925,29 +744,18 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
address entry_point = __ pc();
|
||||
|
||||
// Register allocation
|
||||
const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6);
|
||||
const Register Rsig_handler = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */);
|
||||
const Register Rnative_code = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */);
|
||||
const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6);
|
||||
const Register Rsize_of_params = R6;
|
||||
const Register Rsig_handler = Rtmp_save0; // R4
|
||||
const Register Rnative_code = Rtmp_save1; // R5
|
||||
const Register Rresult_handler = R6;
|
||||
|
||||
#ifdef AARCH64
|
||||
const Register RconstMethod = R10; // also used in generate_fixed_frame (should match)
|
||||
const Register Rsaved_result = Rnative_code;
|
||||
const FloatRegister Dsaved_result = V8;
|
||||
#else
|
||||
const Register Rsaved_result_lo = Rtmp_save0; // R4
|
||||
const Register Rsaved_result_hi = Rtmp_save1; // R5
|
||||
FloatRegister saved_result_fp;
|
||||
#endif // AARCH64
|
||||
|
||||
|
||||
#ifdef AARCH64
|
||||
__ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
|
||||
__ ldrh(Rsize_of_params, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
|
||||
#else
|
||||
__ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset()));
|
||||
__ ldrh(Rsize_of_params, Address(Rsize_of_params, ConstMethod::size_of_parameters_offset()));
|
||||
#endif // AARCH64
|
||||
|
||||
// native calls don't need the stack size check since they have no expression stack
|
||||
// and the arguments are already on the stack and we only add a handful of words
|
||||
@ -957,19 +765,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ sub(Rlocals, Rparams, wordSize);
|
||||
__ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize));
|
||||
|
||||
#ifdef AARCH64
|
||||
int extra_stack_reserve = 2*wordSize; // extra space for oop_temp
|
||||
if(__ can_post_interpreter_events()) {
|
||||
// extra space for saved results
|
||||
extra_stack_reserve += 2*wordSize;
|
||||
}
|
||||
// reserve extra stack space and nullify oop_temp slot
|
||||
__ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed));
|
||||
#else
|
||||
// reserve stack space for oop_temp
|
||||
__ mov(R0, 0);
|
||||
__ push(R0);
|
||||
#endif // AARCH64
|
||||
|
||||
generate_fixed_frame(true); // Note: R9 is now saved in the frame
|
||||
|
||||
@ -1065,15 +863,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
// Allocate stack space for arguments
|
||||
|
||||
#ifdef AARCH64
|
||||
__ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord);
|
||||
__ align_reg(SP, Rtemp, StackAlignmentInBytes);
|
||||
|
||||
// Allocate more stack space to accomodate all arguments passed on GP and FP registers:
|
||||
// 8 * wordSize for GPRs
|
||||
// 8 * wordSize for FPRs
|
||||
int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes);
|
||||
#else
|
||||
|
||||
// C functions need aligned stack
|
||||
__ bic(SP, SP, StackAlignmentInBytes - 1);
|
||||
@ -1093,12 +882,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// It is also used for JNIEnv & class additional parameters.
|
||||
int reg_arguments = 4 * wordSize;
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
|
||||
__ sub(SP, SP, reg_arguments);
|
||||
|
||||
|
||||
// Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers.
|
||||
// Note: signature handler blows R4 besides all scratch registers.
|
||||
// See AbstractInterpreterGenerator::generate_slow_signature_handler().
|
||||
__ call(Rsig_handler);
|
||||
#if R9_IS_SCRATCHED
|
||||
@ -1134,18 +922,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
__ mov(Rtemp, _thread_in_native);
|
||||
__ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset()));
|
||||
// STLR is used to force all preceding writes to be observed prior to thread state change
|
||||
__ stlr_w(Rtemp, Rtemp2);
|
||||
#else
|
||||
// Force all preceding writes to be observed prior to thread state change
|
||||
__ membar(MacroAssembler::StoreStore, Rtemp);
|
||||
|
||||
__ mov(Rtemp, _thread_in_native);
|
||||
__ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
|
||||
#endif // AARCH64
|
||||
|
||||
__ call(Rnative_code);
|
||||
#if R9_IS_SCRATCHED
|
||||
@ -1167,10 +948,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state());
|
||||
|
||||
// Protect the return value in the interleaved code: save it to callee-save registers.
|
||||
#ifdef AARCH64
|
||||
__ mov(Rsaved_result, R0);
|
||||
__ fmov_d(Dsaved_result, D0);
|
||||
#else
|
||||
__ mov(Rsaved_result_lo, R0);
|
||||
__ mov(Rsaved_result_hi, R1);
|
||||
#ifdef __ABI_HARD__
|
||||
@ -1180,26 +957,17 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
#else
|
||||
saved_result_fp = fnoreg;
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
|
||||
{
|
||||
__ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cmp(Rtemp, SafepointSynchronize::_not_synchronized);
|
||||
__ cond_cmp(R3, 0, eq);
|
||||
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
__ b(L, eq);
|
||||
__ mov(R0, Rthread);
|
||||
__ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none);
|
||||
__ bind(L);
|
||||
#else
|
||||
__ mov(R0, Rthread, ne);
|
||||
__ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne);
|
||||
#if R9_IS_SCRATCHED
|
||||
__ restore_method();
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// Perform Native->Java thread transition
|
||||
@ -1217,15 +985,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Unbox oop result, e.g. JNIHandles::resolve result if it's an oop.
|
||||
{
|
||||
Label Lnot_oop;
|
||||
#ifdef AARCH64
|
||||
__ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT));
|
||||
__ cmp(Rresult_handler, Rtemp);
|
||||
__ b(Lnot_oop, ne);
|
||||
#else // !AARCH64
|
||||
// For ARM32, Rresult_handler is -1 for oop result, 0 otherwise.
|
||||
__ cbz(Rresult_handler, Lnot_oop);
|
||||
#endif // !AARCH64
|
||||
Register value = AARCH64_ONLY(Rsaved_result) NOT_AARCH64(Rsaved_result_lo);
|
||||
Register value = Rsaved_result_lo;
|
||||
__ resolve_jobject(value, // value
|
||||
Rtemp, // tmp1
|
||||
R1_tmp); // tmp2
|
||||
@ -1234,43 +996,23 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ bind(Lnot_oop);
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
// Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame
|
||||
__ restore_sp_after_call(Rtemp);
|
||||
__ check_stack_top();
|
||||
#endif // AARCH64
|
||||
|
||||
// reguard stack if StackOverflow exception happened while in native.
|
||||
{
|
||||
__ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset()));
|
||||
__ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
__ b(L, ne);
|
||||
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none);
|
||||
__ bind(L);
|
||||
#else
|
||||
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq);
|
||||
#if R9_IS_SCRATCHED
|
||||
__ restore_method();
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// check pending exceptions
|
||||
{
|
||||
__ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
__ cbz(Rtemp, L);
|
||||
__ mov_pc_to(Rexception_pc);
|
||||
__ b(StubRoutines::forward_exception_entry());
|
||||
__ bind(L);
|
||||
#else
|
||||
__ cmp(Rtemp, 0);
|
||||
__ mov(Rexception_pc, PC, ne);
|
||||
__ b(StubRoutines::forward_exception_entry(), ne);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
if (synchronized) {
|
||||
@ -1284,19 +1026,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// the exception handler code notifies the runtime of method exits
|
||||
// too. If this happens before, method entry/exit notifications are
|
||||
// not properly paired (was bug - gri 11/22/99).
|
||||
#ifdef AARCH64
|
||||
__ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result);
|
||||
#else
|
||||
__ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp);
|
||||
#endif // AARCH64
|
||||
|
||||
// Restore the result. Oop result is restored from the stack.
|
||||
#ifdef AARCH64
|
||||
__ mov(R0, Rsaved_result);
|
||||
__ fmov_d(D0, Dsaved_result);
|
||||
|
||||
__ blr(Rresult_handler);
|
||||
#else
|
||||
__ cmp(Rresult_handler, 0);
|
||||
__ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne);
|
||||
__ mov(R0, Rsaved_result_lo, eq);
|
||||
@ -1316,18 +1048,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ bind(L);
|
||||
}
|
||||
#endif // ASSERT
|
||||
#endif // AARCH64
|
||||
|
||||
// Restore FP/LR, sender_sp and return
|
||||
#ifdef AARCH64
|
||||
__ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
__ ldp(FP, LR, Address(FP));
|
||||
__ mov(SP, Rtemp);
|
||||
#else
|
||||
__ mov(Rtemp, FP);
|
||||
__ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
|
||||
__ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
#endif // AARCH64
|
||||
|
||||
__ ret();
|
||||
|
||||
@ -1354,12 +1079,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3);
|
||||
const Register RconstMethod = R3;
|
||||
|
||||
#ifdef AARCH64
|
||||
const Register RmaxStack = R11;
|
||||
const Register RlocalsBase = R12;
|
||||
#endif // AARCH64
|
||||
|
||||
__ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
|
||||
|
||||
@ -1372,48 +1093,10 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
__ sub(R3, R3, R2); // number of additional locals
|
||||
|
||||
#ifdef AARCH64
|
||||
// setup RmaxStack
|
||||
__ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
|
||||
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
|
||||
// none of which are at the same time, so we just need to make sure there is enough room
|
||||
// for the biggest user:
|
||||
// -reserved slot for exception handler
|
||||
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
|
||||
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
|
||||
__ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
|
||||
#endif // AARCH64
|
||||
|
||||
// see if we've got enough room on the stack for locals plus overhead.
|
||||
generate_stack_overflow_check();
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
// allocate space for locals
|
||||
{
|
||||
__ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize));
|
||||
__ align_reg(SP, RlocalsBase, StackAlignmentInBytes);
|
||||
}
|
||||
|
||||
// explicitly initialize locals
|
||||
{
|
||||
Label zero_loop, done;
|
||||
__ cbz(R3, done);
|
||||
|
||||
__ tbz(R3, 0, zero_loop);
|
||||
__ subs(R3, R3, 1);
|
||||
__ str(ZR, Address(RlocalsBase, wordSize, post_indexed));
|
||||
__ b(done, eq);
|
||||
|
||||
__ bind(zero_loop);
|
||||
__ subs(R3, R3, 2);
|
||||
__ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed));
|
||||
__ b(zero_loop, ne);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#else
|
||||
// allocate space for locals
|
||||
// explicitly initialize locals
|
||||
|
||||
@ -1439,7 +1122,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ push(R0, ge);
|
||||
|
||||
__ b(loop, gt);
|
||||
#endif // AARCH64
|
||||
|
||||
// initialize fixed part of activation frame
|
||||
generate_fixed_frame(false);
|
||||
@ -1554,11 +1236,9 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
Interpreter::_rethrow_exception_entry = __ pc();
|
||||
// Rexception_obj: exception
|
||||
|
||||
#ifndef AARCH64
|
||||
// Clear interpreter_frame_last_sp.
|
||||
__ mov(Rtemp, 0);
|
||||
__ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
#endif // !AARCH64
|
||||
|
||||
#if R9_IS_SCRATCHED
|
||||
__ restore_method();
|
||||
@ -1567,9 +1247,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ restore_dispatch();
|
||||
__ restore_locals();
|
||||
|
||||
#ifdef AARCH64
|
||||
__ restore_sp_after_call(Rtemp);
|
||||
#endif // AARCH64
|
||||
|
||||
// Entry point for exceptions thrown within interpreter code
|
||||
Interpreter::_throw_exception_entry = __ pc();
|
||||
@ -1606,9 +1283,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
//
|
||||
Interpreter::_remove_activation_preserving_args_entry = __ pc();
|
||||
|
||||
#ifdef AARCH64
|
||||
__ restore_sp_after_call(Rtemp); // restore SP to extended SP
|
||||
#endif // AARCH64
|
||||
|
||||
__ empty_expression_stack();
|
||||
|
||||
@ -1635,9 +1309,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ ldr(R0, Address(FP, frame::return_addr_offset * wordSize));
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0);
|
||||
__ cbnz_32(R0, caller_not_deoptimized);
|
||||
#ifdef AARCH64
|
||||
__ NOT_TESTED();
|
||||
#endif
|
||||
|
||||
// Compute size of arguments for saving when returning to deoptimized caller
|
||||
__ restore_method();
|
||||
@ -1672,7 +1343,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
/* install_monitor_exception */ false,
|
||||
/* notify_jvmdi */ false);
|
||||
|
||||
#ifndef AARCH64
|
||||
// Finish with popframe handling
|
||||
// A previous I2C followed by a deoptimization might have moved the
|
||||
// outgoing arguments further up the stack. PopFrame expects the
|
||||
@ -1691,17 +1361,11 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ mov(R0, Rthread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2);
|
||||
__ reset_last_Java_frame(Rtemp);
|
||||
#endif // !AARCH64
|
||||
|
||||
#ifdef AARCH64
|
||||
__ restore_sp_after_call(Rtemp);
|
||||
__ restore_stack_top();
|
||||
#else
|
||||
// Restore the last_sp and null it out
|
||||
__ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ mov(Rtemp, (int)NULL_WORD);
|
||||
__ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
#endif // AARCH64
|
||||
|
||||
__ restore_bcp();
|
||||
__ restore_dispatch();
|
||||
@ -1778,9 +1442,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
|
||||
address entry = __ pc();
|
||||
|
||||
#ifdef AARCH64
|
||||
__ restore_sp_after_call(Rtemp); // restore SP to extended SP
|
||||
#endif // AARCH64
|
||||
|
||||
__ restore_bcp();
|
||||
__ restore_dispatch();
|
||||
@ -1801,13 +1462,11 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
|
||||
false, /* install_monitor_exception */
|
||||
true); /* notify_jvmdi */
|
||||
|
||||
#ifndef AARCH64
|
||||
// According to interpreter calling conventions, result is returned in R0/R1,
|
||||
// so ftos (S0) and dtos (D0) are moved to R0/R1.
|
||||
// This conversion should be done after remove_activation, as it uses
|
||||
// push(state) & pop(state) to preserve return value.
|
||||
__ convert_tos_to_retval(state);
|
||||
#endif // !AARCH64
|
||||
__ ret();
|
||||
|
||||
return entry;
|
||||
@ -1830,7 +1489,7 @@ void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address&
|
||||
|
||||
lep = __ pc(); __ push(ltos); __ b(L);
|
||||
|
||||
if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) { // can't share atos entry with itos on AArch64 or if VerifyOops
|
||||
if (VerifyOops) { // can't share atos entry if VerifyOops
|
||||
aep = __ pc(); __ push(atos); __ b(L);
|
||||
} else {
|
||||
aep = __ pc(); // fall through
|
||||
@ -1858,11 +1517,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
|
||||
|
||||
// pass tosca registers as arguments
|
||||
__ mov(R2, R0_tos);
|
||||
#ifdef AARCH64
|
||||
__ mov(R3, ZR);
|
||||
#else
|
||||
__ mov(R3, R1_tos_hi);
|
||||
#endif // AARCH64
|
||||
__ mov(R1, LR); // save return address
|
||||
|
||||
// call tracer
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -41,23 +41,6 @@ class VM_Version: public Abstract_VM_Version {
|
||||
static void initialize();
|
||||
static bool is_initialized() { return _is_initialized; }
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
public:
|
||||
static bool supports_ldrex() { return true; }
|
||||
static bool supports_ldrexd() { return true; }
|
||||
static bool supports_movw() { return true; }
|
||||
|
||||
// Override Abstract_VM_Version implementation
|
||||
static bool use_biased_locking();
|
||||
|
||||
static bool has_simd() { return _has_simd; }
|
||||
static bool has_vfp() { return has_simd(); }
|
||||
static bool simd_math_is_compliant() { return true; }
|
||||
|
||||
static bool prefer_moves_over_load_literal() { return true; }
|
||||
|
||||
#else
|
||||
|
||||
protected:
|
||||
enum Feature_Flag {
|
||||
@ -121,7 +104,6 @@ class VM_Version: public Abstract_VM_Version {
|
||||
|
||||
friend class VM_Version_StubGenerator;
|
||||
|
||||
#endif // AARCH64
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_VM_VM_VERSION_ARM_HPP
|
||||
|
@ -1,261 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jvm.h"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "vm_version_arm.hpp"
|
||||
#include <sys/auxv.h>
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
#ifndef HWCAP_AES
|
||||
#define HWCAP_AES 1 << 3
|
||||
#endif
|
||||
|
||||
bool VM_Version::_is_initialized = false;
|
||||
bool VM_Version::_has_simd = false;
|
||||
|
||||
extern "C" {
|
||||
typedef bool (*check_simd_t)();
|
||||
}
|
||||
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
|
||||
VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
|
||||
|
||||
address generate_check_simd() {
|
||||
StubCodeMark mark(this, "VM_Version", "check_simd");
|
||||
address start = __ pc();
|
||||
|
||||
__ vcnt(Stemp, Stemp);
|
||||
__ mov(R0, 1);
|
||||
__ ret(LR);
|
||||
|
||||
return start;
|
||||
};
|
||||
};
|
||||
|
||||
#undef __
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
extern "C" address check_simd_fault_instr;
|
||||
|
||||
|
||||
void VM_Version::initialize() {
|
||||
ResourceMark rm;
|
||||
|
||||
// Making this stub must be FIRST use of assembler
|
||||
const int stub_size = 128;
|
||||
BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size);
|
||||
if (stub_blob == NULL) {
|
||||
vm_exit_during_initialization("Unable to allocate get_cpu_info stub");
|
||||
}
|
||||
|
||||
if (UseFMA) {
|
||||
warning("FMA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseFMA, false);
|
||||
}
|
||||
|
||||
if (UseSHA) {
|
||||
warning("SHA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
|
||||
if (UseSHA1Intrinsics) {
|
||||
warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA256Intrinsics) {
|
||||
warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA512Intrinsics) {
|
||||
warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
|
||||
warning("CRC32 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
|
||||
warning("CRC32C intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseVectorizedMismatchIntrinsic) {
|
||||
warning("vectorizedMismatch intrinsic is not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
|
||||
}
|
||||
|
||||
CodeBuffer c(stub_blob);
|
||||
|
||||
#ifdef COMPILER2
|
||||
VM_Version_StubGenerator g(&c);
|
||||
|
||||
address check_simd_pc = g.generate_check_simd();
|
||||
if (check_simd_pc != NULL) {
|
||||
check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc);
|
||||
check_simd_fault_instr = (address)check_simd;
|
||||
_has_simd = check_simd();
|
||||
} else {
|
||||
assert(! _has_simd, "default _has_simd value must be 'false'");
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long auxv = getauxval(AT_HWCAP);
|
||||
|
||||
char buf[512];
|
||||
jio_snprintf(buf, sizeof(buf), "AArch64%s",
|
||||
((auxv & HWCAP_AES) ? ", aes" : ""));
|
||||
|
||||
_features_string = os::strdup(buf);
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (auxv & HWCAP_AES) {
|
||||
if (FLAG_IS_DEFAULT(UseAES)) {
|
||||
FLAG_SET_DEFAULT(UseAES, true);
|
||||
}
|
||||
if (!UseAES) {
|
||||
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
} else {
|
||||
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
if (UseAES || UseAESIntrinsics) {
|
||||
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
|
||||
warning("AES instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAES, false);
|
||||
}
|
||||
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("AES/CTR intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
}
|
||||
|
||||
_supports_cx8 = true;
|
||||
_supports_atomic_getset4 = true;
|
||||
_supports_atomic_getadd4 = true;
|
||||
_supports_atomic_getset8 = true;
|
||||
_supports_atomic_getadd8 = true;
|
||||
|
||||
// TODO-AARCH64 revise C2 flags
|
||||
|
||||
if (has_simd()) {
|
||||
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
|
||||
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxVectorSize)) {
|
||||
// FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8);
|
||||
// SIMD/NEON can use 16, but default is 8 because currently
|
||||
// larger than 8 will disable instruction scheduling
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 8);
|
||||
}
|
||||
|
||||
if (MaxVectorSize > 16) {
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 8);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
|
||||
Tier4CompileThreshold = 10000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) {
|
||||
Tier3InvocationThreshold = 1000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) {
|
||||
Tier3CompileThreshold = 5000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) {
|
||||
Tier3MinInvocationThreshold = 500;
|
||||
}
|
||||
|
||||
FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported
|
||||
|
||||
// This machine does not allow unaligned memory accesses
|
||||
if (UseUnalignedAccesses) {
|
||||
if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
|
||||
warning("Unaligned memory access is not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
|
||||
}
|
||||
|
||||
_is_initialized = true;
|
||||
}
|
||||
|
||||
bool VM_Version::use_biased_locking() {
|
||||
// TODO-AARCH64 measure performance and revise
|
||||
|
||||
// The cost of CAS on uniprocessor ARM v6 and later is low compared to the
|
||||
// overhead related to slightly longer Biased Locking execution path.
|
||||
// Testing shows no improvement when running with Biased Locking enabled
|
||||
// on an ARMv6 and higher uniprocessor systems. The situation is different on
|
||||
// ARMv5 and MP systems.
|
||||
//
|
||||
// Therefore the Biased Locking is enabled on ARMv5 and ARM MP only.
|
||||
//
|
||||
return os::is_MP();
|
||||
}
|
@ -49,11 +49,7 @@ void VM_Version_Ext::initialize_cpu_information(void) {
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
#ifdef AARCH64
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "AArch64");
|
||||
#else
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "ARM%d", _arm_arch);
|
||||
#endif
|
||||
snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "%s", _features_string);
|
||||
_initialized = true;
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
|
||||
|
||||
assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
|
||||
int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff);
|
||||
int offset_mask = 0xfff;
|
||||
if (method_offset & ~offset_mask) {
|
||||
__ add(tmp, tmp, method_offset & ~offset_mask);
|
||||
}
|
||||
@ -109,12 +109,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
#endif
|
||||
|
||||
address ame_addr = __ pc();
|
||||
#ifdef AARCH64
|
||||
__ ldr(tmp, Address(Rmethod, Method::from_compiled_offset()));
|
||||
__ br(tmp);
|
||||
#else
|
||||
__ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
|
||||
#endif // AARCH64
|
||||
|
||||
masm->flush();
|
||||
bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
|
||||
@ -150,9 +145,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
|
||||
|
||||
// R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
|
||||
const Register Rclass = AARCH64_ONLY(R9) NOT_AARCH64(R4);
|
||||
const Register Rintf = AARCH64_ONLY(R10) NOT_AARCH64(R5);
|
||||
const Register Rscan = AARCH64_ONLY(R11) NOT_AARCH64(R6);
|
||||
const Register Rclass = R4;
|
||||
const Register Rintf = R5;
|
||||
const Register Rscan = R6;
|
||||
|
||||
Label L_no_such_interface;
|
||||
|
||||
@ -200,12 +195,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
|
||||
address ame_addr = __ pc();
|
||||
|
||||
#ifdef AARCH64
|
||||
__ ldr(Rtemp, Address(Rmethod, Method::from_compiled_offset()));
|
||||
__ br(Rtemp);
|
||||
#else
|
||||
__ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
|
||||
#endif // AARCH64
|
||||
|
||||
__ bind(L_no_such_interface);
|
||||
// Handle IncompatibleClassChangeError in itable stubs.
|
||||
|
@ -535,6 +535,9 @@ class Assembler : public AbstractAssembler {
|
||||
XVMULSP_OPCODE = (60u << OPCODE_SHIFT | 80u << 3),
|
||||
XVMULDP_OPCODE = (60u << OPCODE_SHIFT | 112u << 3),
|
||||
|
||||
// Deliver A Random Number (introduced with POWER9)
|
||||
DARN_OPCODE = (31u << OPCODE_SHIFT | 755u << 1),
|
||||
|
||||
// Vector Permute and Formatting
|
||||
VPKPX_OPCODE = (4u << OPCODE_SHIFT | 782u ),
|
||||
VPKSHSS_OPCODE = (4u << OPCODE_SHIFT | 398u ),
|
||||
@ -1072,6 +1075,7 @@ class Assembler : public AbstractAssembler {
|
||||
static int frt( int x) { return opp_u_field(x, 10, 6); }
|
||||
static int fxm( int x) { return opp_u_field(x, 19, 12); }
|
||||
static int l10( int x) { return opp_u_field(x, 10, 10); }
|
||||
static int l14( int x) { return opp_u_field(x, 15, 14); }
|
||||
static int l15( int x) { return opp_u_field(x, 15, 15); }
|
||||
static int l910( int x) { return opp_u_field(x, 10, 9); }
|
||||
static int e1215( int x) { return opp_u_field(x, 15, 12); }
|
||||
@ -2220,6 +2224,9 @@ class Assembler : public AbstractAssembler {
|
||||
inline void mtfprwa( FloatRegister d, Register a);
|
||||
inline void mffprd( Register a, FloatRegister d);
|
||||
|
||||
// Deliver A Random Number (introduced with POWER9)
|
||||
inline void darn( Register d, int l = 1 /*L=CRN*/);
|
||||
|
||||
// AES (introduced with Power 8)
|
||||
inline void vcipher( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -979,6 +979,9 @@ inline void Assembler::tsuspend_() { emit_int32( TS
|
||||
inline void Assembler::tresume_() { emit_int32( TSR_OPCODE | /*L=1*/ 1u << (31-10) | rc(1)); }
|
||||
inline void Assembler::tcheck(int f) { emit_int32( TCHECK_OPCODE | bf(f)); }
|
||||
|
||||
// Deliver A Random Number (introduced with POWER9)
|
||||
inline void Assembler::darn(Register d, int l /* =1 */) { emit_int32( DARN_OPCODE | rt(d) | l14(l)); }
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lwz( Register d, int si16 ) { emit_int32( LWZ_OPCODE | rt(d) | d1(si16));}
|
||||
|
@ -63,7 +63,9 @@ void VM_Version::initialize() {
|
||||
|
||||
// If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
|
||||
if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
|
||||
if (VM_Version::has_lqarx()) {
|
||||
if (VM_Version::has_darn()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 9);
|
||||
} else if (VM_Version::has_lqarx()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
|
||||
} else if (VM_Version::has_popcntw()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
|
||||
@ -78,6 +80,7 @@ void VM_Version::initialize() {
|
||||
|
||||
bool PowerArchitecturePPC64_ok = false;
|
||||
switch (PowerArchitecturePPC64) {
|
||||
case 9: if (!VM_Version::has_darn() ) break;
|
||||
case 8: if (!VM_Version::has_lqarx() ) break;
|
||||
case 7: if (!VM_Version::has_popcntw()) break;
|
||||
case 6: if (!VM_Version::has_cmpb() ) break;
|
||||
@ -131,12 +134,11 @@ void VM_Version::initialize() {
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
(has_fsqrt() ? " fsqrt" : ""),
|
||||
(has_isel() ? " isel" : ""),
|
||||
(has_lxarxeh() ? " lxarxeh" : ""),
|
||||
(has_cmpb() ? " cmpb" : ""),
|
||||
//(has_mftgpr()? " mftgpr" : ""),
|
||||
(has_popcntb() ? " popcntb" : ""),
|
||||
(has_popcntw() ? " popcntw" : ""),
|
||||
(has_fcfids() ? " fcfids" : ""),
|
||||
@ -149,7 +151,8 @@ void VM_Version::initialize() {
|
||||
(has_ldbrx() ? " ldbrx" : ""),
|
||||
(has_stdbrx() ? " stdbrx" : ""),
|
||||
(has_vshasig() ? " sha" : ""),
|
||||
(has_tm() ? " rtm" : "")
|
||||
(has_tm() ? " rtm" : ""),
|
||||
(has_darn() ? " darn" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
);
|
||||
_features_string = os::strdup(buf);
|
||||
@ -663,6 +666,8 @@ void VM_Version::determine_features() {
|
||||
a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[14] -> ldbrx
|
||||
a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> stdbrx
|
||||
a->vshasigmaw(VR0, VR1, 1, 0xF); // code[16] -> vshasig
|
||||
// rtm is determined by OS
|
||||
a->darn(R7); // code[17] -> darn
|
||||
a->blr();
|
||||
|
||||
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
|
||||
@ -714,6 +719,8 @@ void VM_Version::determine_features() {
|
||||
if (code[feature_cntr++]) features |= ldbrx_m;
|
||||
if (code[feature_cntr++]) features |= stdbrx_m;
|
||||
if (code[feature_cntr++]) features |= vshasig_m;
|
||||
// feature rtm_m is determined by OS
|
||||
if (code[feature_cntr++]) features |= darn_m;
|
||||
|
||||
// Print the detection code.
|
||||
if (PrintAssembly) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,6 +50,7 @@ protected:
|
||||
stdbrx,
|
||||
vshasig,
|
||||
rtm,
|
||||
darn,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
@ -72,6 +73,7 @@ protected:
|
||||
stdbrx_m = (1 << stdbrx ),
|
||||
vshasig_m = (1 << vshasig),
|
||||
rtm_m = (1 << rtm ),
|
||||
darn_m = (1 << darn ),
|
||||
all_features_m = (unsigned long)-1
|
||||
};
|
||||
|
||||
@ -108,9 +110,10 @@ public:
|
||||
static bool has_ldbrx() { return (_features & ldbrx_m) != 0; }
|
||||
static bool has_stdbrx() { return (_features & stdbrx_m) != 0; }
|
||||
static bool has_vshasig() { return (_features & vshasig_m) != 0; }
|
||||
static bool has_mtfprd() { return has_vpmsumb(); } // alias for P8
|
||||
// OS feature support
|
||||
static bool has_tm() { return (_features & rtm_m) != 0; }
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
|
||||
static bool has_mtfprd() { return has_vpmsumb(); } // alias for P8
|
||||
|
||||
// Assembler testing
|
||||
static void allow_all();
|
||||
|
@ -776,12 +776,7 @@ bool os::Aix::get_meminfo(meminfo_t* pmi) {
|
||||
// Thread start routine for all newly created threads
|
||||
static void *thread_native_entry(Thread *thread) {
|
||||
|
||||
// find out my own stack dimensions
|
||||
{
|
||||
// actually, this should do exactly the same as thread->record_stack_base_and_size...
|
||||
thread->set_stack_base(os::current_stack_base());
|
||||
thread->set_stack_size(os::current_stack_size());
|
||||
}
|
||||
thread->record_stack_base_and_size();
|
||||
|
||||
const pthread_t pthread_id = ::pthread_self();
|
||||
const tid_t kernel_thread_id = ::thread_self();
|
||||
@ -834,20 +829,15 @@ static void *thread_native_entry(Thread *thread) {
|
||||
assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
|
||||
|
||||
// Call one more level start routine.
|
||||
thread->run();
|
||||
thread->call_run();
|
||||
|
||||
// Note: at this point the thread object may already have deleted itself.
|
||||
// Prevent dereferencing it from here on out.
|
||||
thread = NULL;
|
||||
|
||||
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
|
||||
os::current_thread_id(), (uintx) kernel_thread_id);
|
||||
|
||||
// If a thread has not deleted itself ("delete this") as part of its
|
||||
// termination sequence, we have to ensure thread-local-storage is
|
||||
// cleared before we actually terminate. No threads should ever be
|
||||
// deleted asynchronously with respect to their termination.
|
||||
if (Thread::current_or_null_safe() != NULL) {
|
||||
assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
|
||||
thread->clear_thread_current();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,12 +46,6 @@
|
||||
/* overridden in Arguments::parse_each_vm_init_arg. */ \
|
||||
product(bool, UseBsdPosixThreadCPUClocks, true, \
|
||||
"enable fast Bsd Posix clocks where available") \
|
||||
\
|
||||
product(bool, UseHugeTLBFS, false, \
|
||||
"Use MAP_HUGETLB for large pages") \
|
||||
\
|
||||
product(bool, UseSHM, false, \
|
||||
"Use SYSV shared memory for large pages")
|
||||
|
||||
//
|
||||
// Defines Bsd-specific default values. The flags are available on all
|
||||
|
@ -70,50 +70,38 @@
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
// put OS-includes here
|
||||
# include <sys/types.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/stat.h>
|
||||
# include <sys/select.h>
|
||||
# include <pthread.h>
|
||||
# include <signal.h>
|
||||
# include <errno.h>
|
||||
# include <dlfcn.h>
|
||||
# include <stdio.h>
|
||||
# include <unistd.h>
|
||||
# include <sys/resource.h>
|
||||
# include <errno.h>
|
||||
# include <fcntl.h>
|
||||
# include <inttypes.h>
|
||||
# include <poll.h>
|
||||
# include <pthread.h>
|
||||
# include <pwd.h>
|
||||
# include <signal.h>
|
||||
# include <stdint.h>
|
||||
# include <stdio.h>
|
||||
# include <string.h>
|
||||
# include <sys/ioctl.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/param.h>
|
||||
# include <sys/resource.h>
|
||||
# include <sys/socket.h>
|
||||
# include <sys/stat.h>
|
||||
# include <sys/syscall.h>
|
||||
# include <sys/sysctl.h>
|
||||
# include <sys/time.h>
|
||||
# include <sys/times.h>
|
||||
# include <sys/utsname.h>
|
||||
# include <sys/socket.h>
|
||||
# include <sys/types.h>
|
||||
# include <sys/wait.h>
|
||||
# include <time.h>
|
||||
# include <pwd.h>
|
||||
# include <poll.h>
|
||||
# include <fcntl.h>
|
||||
# include <string.h>
|
||||
# include <sys/param.h>
|
||||
# include <sys/sysctl.h>
|
||||
# include <sys/ipc.h>
|
||||
# include <sys/shm.h>
|
||||
#ifndef __APPLE__
|
||||
# include <link.h>
|
||||
#endif
|
||||
# include <stdint.h>
|
||||
# include <inttypes.h>
|
||||
# include <sys/ioctl.h>
|
||||
# include <sys/syscall.h>
|
||||
# include <unistd.h>
|
||||
|
||||
#if defined(__FreeBSD__) || defined(__NetBSD__)
|
||||
#include <elf.h>
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <mach/mach.h> // semaphore_* API
|
||||
#include <mach-o/dyld.h>
|
||||
#include <sys/proc_info.h>
|
||||
#include <objc/objc-auto.h>
|
||||
#endif
|
||||
|
||||
#ifndef MAP_ANONYMOUS
|
||||
@ -125,8 +113,6 @@
|
||||
// for timer info max values which include all bits
|
||||
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
|
||||
|
||||
#define LARGEPAGES_BIT (1 << 6)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// global variables
|
||||
julong os::Bsd::_physical_memory = 0;
|
||||
@ -644,6 +630,9 @@ static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
|
||||
|
||||
// Thread start routine for all newly created threads
|
||||
static void *thread_native_entry(Thread *thread) {
|
||||
|
||||
thread->record_stack_base_and_size();
|
||||
|
||||
// Try to randomize the cache line index of hot stack frames.
|
||||
// This helps when threads of the same stack traces evict each other's
|
||||
// cache lines. The threads can be either from the same JVM instance, or
|
||||
@ -696,20 +685,15 @@ static void *thread_native_entry(Thread *thread) {
|
||||
}
|
||||
|
||||
// call one more level start routine
|
||||
thread->run();
|
||||
thread->call_run();
|
||||
|
||||
// Note: at this point the thread object may already have deleted itself.
|
||||
// Prevent dereferencing it from here on out.
|
||||
thread = NULL;
|
||||
|
||||
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
|
||||
os::current_thread_id(), (uintx) pthread_self());
|
||||
|
||||
// If a thread has not deleted itself ("delete this") as part of its
|
||||
// termination sequence, we have to ensure thread-local-storage is
|
||||
// cleared before we actually terminate. No threads should ever be
|
||||
// deleted asynchronously with respect to their termination.
|
||||
if (Thread::current_or_null_safe() != NULL) {
|
||||
assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
|
||||
thread->clear_thread_current();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2118,95 +2102,27 @@ void os::large_page_init() {
|
||||
|
||||
|
||||
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||
fatal("This code is not used or maintained.");
|
||||
|
||||
// "exec" is passed in but not used. Creating the shared image for
|
||||
// the code cache doesn't have an SHM_X executable permission to check.
|
||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||
|
||||
key_t key = IPC_PRIVATE;
|
||||
char *addr;
|
||||
|
||||
bool warn_on_failure = UseLargePages &&
|
||||
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
||||
|
||||
// Create a large shared memory region to attach to based on size.
|
||||
// Currently, size is the total size of the heap
|
||||
int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
|
||||
if (shmid == -1) {
|
||||
// Possible reasons for shmget failure:
|
||||
// 1. shmmax is too small for Java heap.
|
||||
// > check shmmax value: cat /proc/sys/kernel/shmmax
|
||||
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
|
||||
// 2. not enough large page memory.
|
||||
// > check available large pages: cat /proc/meminfo
|
||||
// > increase amount of large pages:
|
||||
// echo new_value > /proc/sys/vm/nr_hugepages
|
||||
// Note 1: different Bsd may use different name for this property,
|
||||
// e.g. on Redhat AS-3 it is "hugetlb_pool".
|
||||
// Note 2: it's possible there's enough physical memory available but
|
||||
// they are so fragmented after a long run that they can't
|
||||
// coalesce into large pages. Try to reserve large pages when
|
||||
// the system is still "fresh".
|
||||
if (warn_on_failure) {
|
||||
warning("Failed to reserve shared memory (errno = %d).", errno);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// attach to the region
|
||||
addr = (char*)shmat(shmid, req_addr, 0);
|
||||
int err = errno;
|
||||
|
||||
// Remove shmid. If shmat() is successful, the actual shared memory segment
|
||||
// will be deleted when it's detached by shmdt() or when the process
|
||||
// terminates. If shmat() is not successful this will remove the shared
|
||||
// segment immediately.
|
||||
shmctl(shmid, IPC_RMID, NULL);
|
||||
|
||||
if ((intptr_t)addr == -1) {
|
||||
if (warn_on_failure) {
|
||||
warning("Failed to attach shared memory (errno = %d).", err);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||
|
||||
return addr;
|
||||
fatal("os::reserve_memory_special should not be called on BSD.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr(Tracker::release);
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return shmdt(base) == 0;
|
||||
}
|
||||
fatal("os::release_memory_special should not be called on BSD.");
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t os::large_page_size() {
|
||||
return _large_page_size;
|
||||
}
|
||||
|
||||
// HugeTLBFS allows application to commit large page memory on demand;
|
||||
// with SysV SHM the entire memory region must be allocated as shared
|
||||
// memory.
|
||||
bool os::can_commit_large_page_memory() {
|
||||
return UseHugeTLBFS;
|
||||
// Does not matter, we do not support huge pages.
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::can_execute_large_page_memory() {
|
||||
return UseHugeTLBFS;
|
||||
// Does not matter, we do not support huge pages.
|
||||
return false;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
|
||||
|
@ -649,6 +649,9 @@ bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
|
||||
|
||||
// Thread start routine for all newly created threads
|
||||
static void *thread_native_entry(Thread *thread) {
|
||||
|
||||
thread->record_stack_base_and_size();
|
||||
|
||||
// Try to randomize the cache line index of hot stack frames.
|
||||
// This helps when threads of the same stack traces evict each other's
|
||||
// cache lines. The threads can be either from the same JVM instance, or
|
||||
@ -695,20 +698,15 @@ static void *thread_native_entry(Thread *thread) {
|
||||
}
|
||||
|
||||
// call one more level start routine
|
||||
thread->run();
|
||||
thread->call_run();
|
||||
|
||||
// Note: at this point the thread object may already have deleted itself.
|
||||
// Prevent dereferencing it from here on out.
|
||||
thread = NULL;
|
||||
|
||||
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
|
||||
os::current_thread_id(), (uintx) pthread_self());
|
||||
|
||||
// If a thread has not deleted itself ("delete this") as part of its
|
||||
// termination sequence, we have to ensure thread-local-storage is
|
||||
// cleared before we actually terminate. No threads should ever be
|
||||
// deleted asynchronously with respect to their termination.
|
||||
if (Thread::current_or_null_safe() != NULL) {
|
||||
assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
|
||||
thread->clear_thread_current();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -199,6 +199,10 @@ static inline stack_t get_stack_info() {
|
||||
return st;
|
||||
}
|
||||
|
||||
static void _handle_uncaught_cxx_exception() {
|
||||
VMError::report_and_die("An uncaught C++ exception");
|
||||
}
|
||||
|
||||
bool os::is_primordial_thread(void) {
|
||||
int r = thr_main();
|
||||
guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
|
||||
@ -724,6 +728,11 @@ static thread_t main_thread;
|
||||
|
||||
// Thread start routine for all newly created threads
|
||||
extern "C" void* thread_native_entry(void* thread_addr) {
|
||||
|
||||
Thread* thread = (Thread*)thread_addr;
|
||||
|
||||
thread->record_stack_base_and_size();
|
||||
|
||||
// Try to randomize the cache line index of hot stack frames.
|
||||
// This helps when threads of the same stack traces evict each other's
|
||||
// cache lines. The threads can be either from the same JVM instance, or
|
||||
@ -734,7 +743,6 @@ extern "C" void* thread_native_entry(void* thread_addr) {
|
||||
alloca(((pid ^ counter++) & 7) * 128);
|
||||
|
||||
int prio;
|
||||
Thread* thread = (Thread*)thread_addr;
|
||||
|
||||
thread->initialize_thread_current();
|
||||
|
||||
@ -775,7 +783,13 @@ extern "C" void* thread_native_entry(void* thread_addr) {
|
||||
// initialize signal mask for this thread
|
||||
os::Solaris::hotspot_sigmask(thread);
|
||||
|
||||
thread->run();
|
||||
os::Solaris::init_thread_fpu_state();
|
||||
std::set_terminate(_handle_uncaught_cxx_exception);
|
||||
|
||||
thread->call_run();
|
||||
|
||||
// Note: at this point the thread object may already have deleted itself.
|
||||
// Do not dereference it from here on out.
|
||||
|
||||
// One less thread is executing
|
||||
// When the VMThread gets here, the main thread may have already exited
|
||||
@ -786,15 +800,6 @@ extern "C" void* thread_native_entry(void* thread_addr) {
|
||||
|
||||
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
|
||||
|
||||
// If a thread has not deleted itself ("delete this") as part of its
|
||||
// termination sequence, we have to ensure thread-local-storage is
|
||||
// cleared before we actually terminate. No threads should ever be
|
||||
// deleted asynchronously with respect to their termination.
|
||||
if (Thread::current_or_null_safe() != NULL) {
|
||||
assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
|
||||
thread->clear_thread_current();
|
||||
}
|
||||
|
||||
if (UseDetachedThreads) {
|
||||
thr_exit(NULL);
|
||||
ShouldNotReachHere();
|
||||
@ -1090,67 +1095,58 @@ sigset_t* os::Solaris::vm_signals() {
|
||||
return &vm_sigs;
|
||||
}
|
||||
|
||||
void _handle_uncaught_cxx_exception() {
|
||||
VMError::report_and_die("An uncaught C++ exception");
|
||||
}
|
||||
// CR 7190089: on Solaris, primordial thread's stack needs adjusting.
|
||||
// Without the adjustment, stack size is incorrect if stack is set to unlimited (ulimit -s unlimited).
|
||||
void os::Solaris::correct_stack_boundaries_for_primordial_thread(Thread* thr) {
|
||||
assert(is_primordial_thread(), "Call only for primordial thread");
|
||||
|
||||
JavaThread* jt = (JavaThread *)thr;
|
||||
assert(jt != NULL, "Sanity check");
|
||||
size_t stack_size;
|
||||
address base = jt->stack_base();
|
||||
if (Arguments::created_by_java_launcher()) {
|
||||
// Use 2MB to allow for Solaris 7 64 bit mode.
|
||||
stack_size = JavaThread::stack_size_at_create() == 0
|
||||
? 2048*K : JavaThread::stack_size_at_create();
|
||||
|
||||
// First crack at OS-specific initialization, from inside the new thread.
|
||||
void os::initialize_thread(Thread* thr) {
|
||||
if (is_primordial_thread()) {
|
||||
JavaThread* jt = (JavaThread *)thr;
|
||||
assert(jt != NULL, "Sanity check");
|
||||
size_t stack_size;
|
||||
address base = jt->stack_base();
|
||||
if (Arguments::created_by_java_launcher()) {
|
||||
// Use 2MB to allow for Solaris 7 64 bit mode.
|
||||
stack_size = JavaThread::stack_size_at_create() == 0
|
||||
? 2048*K : JavaThread::stack_size_at_create();
|
||||
|
||||
// There are rare cases when we may have already used more than
|
||||
// the basic stack size allotment before this method is invoked.
|
||||
// Attempt to allow for a normally sized java_stack.
|
||||
size_t current_stack_offset = (size_t)(base - (address)&stack_size);
|
||||
stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
|
||||
} else {
|
||||
// 6269555: If we were not created by a Java launcher, i.e. if we are
|
||||
// running embedded in a native application, treat the primordial thread
|
||||
// as much like a native attached thread as possible. This means using
|
||||
// the current stack size from thr_stksegment(), unless it is too large
|
||||
// to reliably setup guard pages. A reasonable max size is 8MB.
|
||||
size_t current_size = current_stack_size();
|
||||
// This should never happen, but just in case....
|
||||
if (current_size == 0) current_size = 2 * K * K;
|
||||
stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
|
||||
}
|
||||
address bottom = align_up(base - stack_size, os::vm_page_size());;
|
||||
stack_size = (size_t)(base - bottom);
|
||||
|
||||
assert(stack_size > 0, "Stack size calculation problem");
|
||||
|
||||
if (stack_size > jt->stack_size()) {
|
||||
#ifndef PRODUCT
|
||||
struct rlimit limits;
|
||||
getrlimit(RLIMIT_STACK, &limits);
|
||||
size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
|
||||
assert(size >= jt->stack_size(), "Stack size problem in main thread");
|
||||
#endif
|
||||
tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
|
||||
"(Stack sizes are rounded up to a multiple of the system page size.)\n"
|
||||
"See limit(1) to increase the stack size limit.",
|
||||
stack_size / K, jt->stack_size() / K);
|
||||
vm_exit(1);
|
||||
}
|
||||
assert(jt->stack_size() >= stack_size,
|
||||
"Attempt to map more stack than was allocated");
|
||||
jt->set_stack_size(stack_size);
|
||||
// There are rare cases when we may have already used more than
|
||||
// the basic stack size allotment before this method is invoked.
|
||||
// Attempt to allow for a normally sized java_stack.
|
||||
size_t current_stack_offset = (size_t)(base - (address)&stack_size);
|
||||
stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
|
||||
} else {
|
||||
// 6269555: If we were not created by a Java launcher, i.e. if we are
|
||||
// running embedded in a native application, treat the primordial thread
|
||||
// as much like a native attached thread as possible. This means using
|
||||
// the current stack size from thr_stksegment(), unless it is too large
|
||||
// to reliably setup guard pages. A reasonable max size is 8MB.
|
||||
size_t current_size = os::current_stack_size();
|
||||
// This should never happen, but just in case....
|
||||
if (current_size == 0) current_size = 2 * K * K;
|
||||
stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
|
||||
}
|
||||
address bottom = align_up(base - stack_size, os::vm_page_size());;
|
||||
stack_size = (size_t)(base - bottom);
|
||||
|
||||
// With the T2 libthread (T1 is no longer supported) threads are always bound
|
||||
// and we use stackbanging in all cases.
|
||||
assert(stack_size > 0, "Stack size calculation problem");
|
||||
|
||||
if (stack_size > jt->stack_size()) {
|
||||
#ifndef PRODUCT
|
||||
struct rlimit limits;
|
||||
getrlimit(RLIMIT_STACK, &limits);
|
||||
size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
|
||||
assert(size >= jt->stack_size(), "Stack size problem in main thread");
|
||||
#endif
|
||||
tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
|
||||
"(Stack sizes are rounded up to a multiple of the system page size.)\n"
|
||||
"See limit(1) to increase the stack size limit.",
|
||||
stack_size / K, jt->stack_size() / K);
|
||||
vm_exit(1);
|
||||
}
|
||||
assert(jt->stack_size() >= stack_size,
|
||||
"Attempt to map more stack than was allocated");
|
||||
jt->set_stack_size(stack_size);
|
||||
|
||||
os::Solaris::init_thread_fpu_state();
|
||||
std::set_terminate(_handle_uncaught_cxx_exception);
|
||||
}
|
||||
|
||||
|
||||
|
@ -102,8 +102,6 @@ class Solaris {
|
||||
static size_t page_size_for_alignment(size_t alignment);
|
||||
static bool setup_large_pages(caddr_t start, size_t bytes, size_t align);
|
||||
|
||||
static void init_thread_fpu_state(void);
|
||||
|
||||
static void try_enable_extended_io();
|
||||
|
||||
static struct sigaction *(*get_signal_action)(int);
|
||||
@ -148,6 +146,9 @@ class Solaris {
|
||||
|
||||
// SR_handler
|
||||
static void SR_handler(Thread* thread, ucontext_t* uc);
|
||||
|
||||
static void init_thread_fpu_state(void);
|
||||
|
||||
protected:
|
||||
// Solaris-specific interface goes here
|
||||
static julong available_memory();
|
||||
@ -268,6 +269,7 @@ class Solaris {
|
||||
static jint _os_thread_limit;
|
||||
static volatile jint _os_thread_count;
|
||||
|
||||
static void correct_stack_boundaries_for_primordial_thread(Thread* thr);
|
||||
|
||||
// Stack overflow handling
|
||||
|
||||
|
@ -420,6 +420,9 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
|
||||
|
||||
// Thread start routine for all newly created threads
|
||||
static unsigned __stdcall thread_native_entry(Thread* thread) {
|
||||
|
||||
thread->record_stack_base_and_size();
|
||||
|
||||
// Try to randomize the cache line index of hot stack frames.
|
||||
// This helps when threads of the same stack traces evict each other's
|
||||
// cache lines. The threads can be either from the same JVM instance, or
|
||||
@ -453,12 +456,15 @@ static unsigned __stdcall thread_native_entry(Thread* thread) {
|
||||
// by VM, so VM can generate error dump when an exception occurred in non-
|
||||
// Java thread (e.g. VM thread).
|
||||
__try {
|
||||
thread->run();
|
||||
thread->call_run();
|
||||
} __except(topLevelExceptionFilter(
|
||||
(_EXCEPTION_POINTERS*)_exception_info())) {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
// Note: at this point the thread object may already have deleted itself.
|
||||
// Do not dereference it from here on out.
|
||||
|
||||
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
|
||||
|
||||
// One less thread is executing
|
||||
@ -468,15 +474,6 @@ static unsigned __stdcall thread_native_entry(Thread* thread) {
|
||||
Atomic::dec(&os::win32::_os_thread_count);
|
||||
}
|
||||
|
||||
// If a thread has not deleted itself ("delete this") as part of its
|
||||
// termination sequence, we have to ensure thread-local-storage is
|
||||
// cleared before we actually terminate. No threads should ever be
|
||||
// deleted asynchronously with respect to their termination.
|
||||
if (Thread::current_or_null_safe() != NULL) {
|
||||
assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
|
||||
thread->clear_thread_current();
|
||||
}
|
||||
|
||||
// Thread must not return from exit_process_or_thread(), but if it does,
|
||||
// let it proceed to exit normally
|
||||
return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
|
||||
|
@ -85,11 +85,6 @@ char* os::non_memory_address_word() {
|
||||
return (char*) -1;
|
||||
}
|
||||
|
||||
// OS specific thread initialization
|
||||
//
|
||||
// Calculate and store the limits of the memory stack.
|
||||
void os::initialize_thread(Thread *thread) { }
|
||||
|
||||
// Frame information (pc, sp, fp) retrieved via ucontext
|
||||
// always looks like a C-frame according to the frame
|
||||
// conventions in frame_ppc.hpp.
|
||||
|
@ -300,10 +300,6 @@ char* os::non_memory_address_word() {
|
||||
return (char*) -1;
|
||||
}
|
||||
|
||||
void os::initialize_thread(Thread* thr) {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
address os::Bsd::ucontext_get_pc(const ucontext_t * uc) {
|
||||
return (address)uc->context_pc;
|
||||
}
|
||||
|
@ -100,10 +100,6 @@ char* os::non_memory_address_word() {
|
||||
#endif // SPARC
|
||||
}
|
||||
|
||||
void os::initialize_thread(Thread* thr) {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
address os::Bsd::ucontext_get_pc(const ucontext_t* uc) {
|
||||
ShouldNotCallThis();
|
||||
return NULL;
|
||||
|
@ -106,9 +106,6 @@ char* os::non_memory_address_word() {
|
||||
return (char*) 0xffffffffffff;
|
||||
}
|
||||
|
||||
void os::initialize_thread(Thread *thr) {
|
||||
}
|
||||
|
||||
address os::Linux::ucontext_get_pc(const ucontext_t * uc) {
|
||||
#ifdef BUILTIN_SIM
|
||||
return (address)uc->uc_mcontext.gregs[REG_PC];
|
||||
|
@ -44,7 +44,6 @@
|
||||
* kernel source or kernel_user_helpers.txt in Linux Doc.
|
||||
*/
|
||||
|
||||
#ifndef AARCH64
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
@ -61,18 +60,9 @@ inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||
(*os::atomic_store_long_func)(
|
||||
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
}
|
||||
#endif
|
||||
|
||||
// As per atomic.hpp all read-modify-write operations have to provide two-way
|
||||
// barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
|
||||
// store-release-with-reservation. While load-acquire combined with store-release
|
||||
// do not generally form two-way barriers, their use with reservations does - the
|
||||
// ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they
|
||||
// provide sequentially consistent semantics. All we need to add is an explicit
|
||||
// barrier in the failure path of the cmpxchg operations (as these don't execute
|
||||
// the store) - arguably this may be overly cautious as there is a very low
|
||||
// likelihood that the hardware would pull loads/stores into the region guarded
|
||||
// by the reservation.
|
||||
// barriers semantics.
|
||||
//
|
||||
// For ARMv7 we add explicit barriers in the stubs.
|
||||
|
||||
@ -90,45 +80,9 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
#ifdef AARCH64
|
||||
D val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %w[val], [%[dest]]\n\t"
|
||||
" add %w[val], %w[val], %w[add_val]\n\t"
|
||||
" stlxr %w[tmp], %w[val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [val] "=&r" (val), [tmp] "=&r" (tmp)
|
||||
: [add_val] "r" (add_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return val;
|
||||
#else
|
||||
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
D val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %[val], [%[dest]]\n\t"
|
||||
" add %[val], %[val], %[add_val]\n\t"
|
||||
" stlxr %w[tmp], %[val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [val] "=&r" (val), [tmp] "=&r" (tmp)
|
||||
: [add_val] "r" (add_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
@ -136,43 +90,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef AARCH64
|
||||
T old_val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %w[old_val], [%[dest]]\n\t"
|
||||
" stlxr %w[tmp], %w[new_val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
|
||||
: [new_val] "r" (exchange_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return old_val;
|
||||
#else
|
||||
return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old_val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %[old_val], [%[dest]]\n\t"
|
||||
" stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
|
||||
: [new_val] "r" (exchange_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return old_val;
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
|
||||
|
||||
@ -180,7 +100,6 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
template<>
|
||||
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
||||
|
||||
#ifndef AARCH64
|
||||
|
||||
inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
|
||||
int32_t volatile* dest,
|
||||
@ -197,7 +116,6 @@ inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
|
||||
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
|
||||
}
|
||||
|
||||
#endif // !AARCH64
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
@ -206,27 +124,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T compare_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef AARCH64
|
||||
T rv;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %w[rv], [%[dest]]\n\t"
|
||||
" cmp %w[rv], %w[cv]\n\t"
|
||||
" b.ne 2f\n\t"
|
||||
" stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
" b 3f\n\t"
|
||||
"2:\n\t"
|
||||
" dmb sy\n\t"
|
||||
"3:\n\t"
|
||||
: [rv] "=&r" (rv), [tmp] "=&r" (tmp)
|
||||
: [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
|
||||
: "memory");
|
||||
return rv;
|
||||
#else
|
||||
return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<>
|
||||
@ -236,27 +134,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T compare_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
#ifdef AARCH64
|
||||
T rv;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %[rv], [%[dest]]\n\t"
|
||||
" cmp %[rv], %[cv]\n\t"
|
||||
" b.ne 2f\n\t"
|
||||
" stlxr %w[tmp], %[ev], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
" b 3f\n\t"
|
||||
"2:\n\t"
|
||||
" dmb sy\n\t"
|
||||
"3:\n\t"
|
||||
: [rv] "=&r" (rv), [tmp] "=&r" (tmp)
|
||||
: [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
|
||||
: "memory");
|
||||
return rv;
|
||||
#else
|
||||
return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
|
||||
|
@ -58,37 +58,18 @@ static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t co
|
||||
}
|
||||
|
||||
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
_Copy_conjoint_jints_atomic(from, to, count * BytesPerInt);
|
||||
#else
|
||||
assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size");
|
||||
// pd_conjoint_words is word-atomic in this implementation.
|
||||
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
assert(HeapWordSize == BytesPerLong, "64-bit architecture");
|
||||
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
|
||||
#else
|
||||
_Copy_conjoint_jlongs_atomic(from, to, count * BytesPerLong);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
assert(BytesPerHeapOop == BytesPerInt, "compressed oops");
|
||||
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
|
||||
} else {
|
||||
assert(BytesPerHeapOop == BytesPerLong, "64-bit architecture");
|
||||
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
|
||||
}
|
||||
#else
|
||||
assert(BytesPerHeapOop == BytesPerInt, "32-bit architecture");
|
||||
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
|
@ -30,16 +30,10 @@
|
||||
// (see globals.hpp)
|
||||
//
|
||||
define_pd_global(bool, DontYieldALot, false);
|
||||
#ifdef AARCH64
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024);
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
// System default ThreadStackSize appears to be 512 which is too big.
|
||||
define_pd_global(intx, ThreadStackSize, 320);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#endif // AARCH64
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#
|
||||
#
|
||||
# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
@ -19,15 +19,15 @@
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
|
||||
# NOTE WELL! The _Copy functions are called directly
|
||||
# from server-compiler-generated code via CallLeafNoFP,
|
||||
# which means that they *must* either not use floating
|
||||
# point or use it in the same manner as does the server
|
||||
# compiler.
|
||||
|
||||
|
||||
.globl _Copy_conjoint_bytes
|
||||
.type _Copy_conjoint_bytes, %function
|
||||
.globl _Copy_arrayof_conjoint_bytes
|
||||
|
@ -1,542 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
# TODO-AARCH64
|
||||
|
||||
# NOTE WELL! The _Copy functions are called directly
|
||||
# from server-compiler-generated code via CallLeafNoFP,
|
||||
# which means that they *must* either not use floating
|
||||
# point or use it in the same manner as does the server
|
||||
# compiler.
|
||||
|
||||
.globl _Copy_conjoint_bytes
|
||||
.type _Copy_conjoint_bytes, %function
|
||||
.globl _Copy_arrayof_conjoint_bytes
|
||||
.type _Copy_arrayof_conjoint_bytes, %function
|
||||
.globl _Copy_disjoint_words
|
||||
.type _Copy_disjoint_words, %function
|
||||
.globl _Copy_conjoint_words
|
||||
.type _Copy_conjoint_words, %function
|
||||
.globl _Copy_conjoint_jshorts_atomic
|
||||
.type _Copy_conjoint_jshorts_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jshorts
|
||||
.type _Copy_arrayof_conjoint_jshorts, %function
|
||||
.globl _Copy_conjoint_jints_atomic
|
||||
.type _Copy_conjoint_jints_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jints
|
||||
.type _Copy_arrayof_conjoint_jints, %function
|
||||
.globl _Copy_conjoint_jlongs_atomic
|
||||
.type _Copy_conjoint_jlongs_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jlongs
|
||||
.type _Copy_arrayof_conjoint_jlongs, %function
|
||||
|
||||
.text
|
||||
.globl SpinPause
|
||||
.type SpinPause, %function
|
||||
SpinPause:
|
||||
yield
|
||||
ret
|
||||
|
||||
# Support for void Copy::conjoint_bytes(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_bytes:
|
||||
hlt 1002
|
||||
|
||||
# Support for void Copy::arrayof_conjoint_bytes(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_arrayof_conjoint_bytes:
|
||||
hlt 1003
|
||||
|
||||
|
||||
# Support for void Copy::disjoint_words(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_disjoint_words:
|
||||
# These and further memory prefetches may hit out of array ranges.
|
||||
# Experiments showed that prefetching of inaccessible memory doesn't result in exceptions.
|
||||
prfm pldl1keep, [x0, #0]
|
||||
prfm pstl1keep, [x1, #0]
|
||||
prfm pldl1keep, [x0, #64]
|
||||
prfm pstl1keep, [x1, #64]
|
||||
|
||||
subs x18, x2, #128
|
||||
b.ge dw_large
|
||||
|
||||
dw_lt_128:
|
||||
# Copy [x0, x0 + x2) to [x1, x1 + x2)
|
||||
|
||||
adr x15, dw_tail_table_base
|
||||
and x16, x2, #~8
|
||||
|
||||
# Calculate address to jump and store it to x15:
|
||||
# Each pair of instructions before dw_tail_table_base copies 16 bytes.
|
||||
# x16 is count of bytes to copy aligned down by 16.
|
||||
# So x16/16 pairs of instructions should be executed.
|
||||
# Each pair takes 8 bytes, so x15 = dw_tail_table_base - (x16/16)*8 = x15 - x16/2
|
||||
sub x15, x15, x16, lsr #1
|
||||
prfm plil1keep, [x15]
|
||||
|
||||
add x17, x0, x2
|
||||
add x18, x1, x2
|
||||
|
||||
# If x2 = x16 + 8, then copy 8 bytes and x16 bytes after that.
|
||||
# Otherwise x2 = x16, so proceed to copy x16 bytes.
|
||||
tbz x2, #3, dw_lt_128_even
|
||||
ldr x3, [x0]
|
||||
str x3, [x1]
|
||||
dw_lt_128_even:
|
||||
# Copy [x17 - x16, x17) to [x18 - x16, x18)
|
||||
# x16 is aligned by 16 and less than 128
|
||||
|
||||
# Execute (x16/16) ldp-stp pairs; each pair copies 16 bytes
|
||||
br x15
|
||||
|
||||
ldp x3, x4, [x17, #-112]
|
||||
stp x3, x4, [x18, #-112]
|
||||
ldp x5, x6, [x17, #-96]
|
||||
stp x5, x6, [x18, #-96]
|
||||
ldp x7, x8, [x17, #-80]
|
||||
stp x7, x8, [x18, #-80]
|
||||
ldp x9, x10, [x17, #-64]
|
||||
stp x9, x10, [x18, #-64]
|
||||
ldp x11, x12, [x17, #-48]
|
||||
stp x11, x12, [x18, #-48]
|
||||
ldp x13, x14, [x17, #-32]
|
||||
stp x13, x14, [x18, #-32]
|
||||
ldp x15, x16, [x17, #-16]
|
||||
stp x15, x16, [x18, #-16]
|
||||
dw_tail_table_base:
|
||||
ret
|
||||
|
||||
.p2align 6
|
||||
.rept 12
|
||||
nop
|
||||
.endr
|
||||
dw_large:
|
||||
# x18 >= 0;
|
||||
# Copy [x0, x0 + x18 + 128) to [x1, x1 + x18 + 128)
|
||||
|
||||
ldp x3, x4, [x0], #64
|
||||
ldp x5, x6, [x0, #-48]
|
||||
ldp x7, x8, [x0, #-32]
|
||||
ldp x9, x10, [x0, #-16]
|
||||
|
||||
# Before and after each iteration of loop registers x3-x10 contain [x0 - 64, x0),
|
||||
# and x1 is a place to copy this data;
|
||||
# x18 contains number of bytes to be stored minus 128
|
||||
|
||||
# Exactly 16 instructions from p2align, so dw_loop starts from cache line boundary
|
||||
# Checking it explictly by aligning with "hlt 1000" instructions
|
||||
.p2alignl 6, 0xd4407d00
|
||||
dw_loop:
|
||||
prfm pldl1keep, [x0, #64]
|
||||
# Next line actually hurted memory copy performance (for interpreter) - JDK-8078120
|
||||
# prfm pstl1keep, [x1, #64]
|
||||
|
||||
subs x18, x18, #64
|
||||
|
||||
stp x3, x4, [x1, #0]
|
||||
ldp x3, x4, [x0, #0]
|
||||
stp x5, x6, [x1, #16]
|
||||
ldp x5, x6, [x0, #16]
|
||||
stp x7, x8, [x1, #32]
|
||||
ldp x7, x8, [x0, #32]
|
||||
stp x9, x10, [x1, #48]
|
||||
ldp x9, x10, [x0, #48]
|
||||
|
||||
add x1, x1, #64
|
||||
add x0, x0, #64
|
||||
|
||||
b.ge dw_loop
|
||||
|
||||
# 13 instructions from dw_loop, so the loop body hits into one cache line
|
||||
|
||||
dw_loop_end:
|
||||
adds x2, x18, #64
|
||||
|
||||
stp x3, x4, [x1], #64
|
||||
stp x5, x6, [x1, #-48]
|
||||
stp x7, x8, [x1, #-32]
|
||||
stp x9, x10, [x1, #-16]
|
||||
|
||||
# Increased x18 by 64, but stored 64 bytes, so x2 contains exact number of bytes to be stored
|
||||
|
||||
# If this number is not zero, also copy remaining bytes
|
||||
b.ne dw_lt_128
|
||||
ret
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_words(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_words:
|
||||
subs x3, x1, x0
|
||||
# hi condition is met <=> from < to
|
||||
ccmp x2, x3, #0, hi
|
||||
# hi condition is met <=> (from < to) and (to - from < count)
|
||||
# otherwise _Copy_disjoint_words may be used, because it performs forward copying,
|
||||
# so it also works when ranges overlap but to <= from
|
||||
b.ls _Copy_disjoint_words
|
||||
|
||||
# Overlapping case should be the rare one, it does not worth optimizing
|
||||
|
||||
ands x3, x2, #~8
|
||||
# x3 is count aligned down by 2*wordSize
|
||||
add x0, x0, x2
|
||||
add x1, x1, x2
|
||||
sub x3, x3, #16
|
||||
# Skip loop if 0 or 1 words
|
||||
b.eq cw_backward_loop_end
|
||||
|
||||
# x3 >= 0
|
||||
# Copy [x0 - x3 - 16, x0) to [x1 - x3 - 16, x1) backward
|
||||
cw_backward_loop:
|
||||
subs x3, x3, #16
|
||||
ldp x4, x5, [x0, #-16]!
|
||||
stp x4, x5, [x1, #-16]!
|
||||
b.ge cw_backward_loop
|
||||
|
||||
cw_backward_loop_end:
|
||||
# Copy remaining 0 or 1 words
|
||||
tbz x2, #3, cw_finish
|
||||
ldr x3, [x0, #-8]
|
||||
str x3, [x1, #-8]
|
||||
|
||||
cw_finish:
|
||||
ret
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_jshorts_atomic(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jshorts_atomic:
|
||||
add x17, x0, x2
|
||||
add x18, x1, x2
|
||||
|
||||
subs x3, x1, x0
|
||||
# hi is met <=> (from < to) and (to - from < count)
|
||||
ccmp x2, x3, #0, hi
|
||||
b.hi cs_backward
|
||||
|
||||
subs x3, x2, #14
|
||||
b.ge cs_forward_loop
|
||||
|
||||
# Copy x2 < 14 bytes from x0 to x1
|
||||
cs_forward_lt14:
|
||||
ands x7, x2, #7
|
||||
tbz x2, #3, cs_forward_lt8
|
||||
ldrh w3, [x0, #0]
|
||||
ldrh w4, [x0, #2]
|
||||
ldrh w5, [x0, #4]
|
||||
ldrh w6, [x0, #6]
|
||||
|
||||
strh w3, [x1, #0]
|
||||
strh w4, [x1, #2]
|
||||
strh w5, [x1, #4]
|
||||
strh w6, [x1, #6]
|
||||
|
||||
# Copy x7 < 8 bytes from x17 - x7 to x18 - x7
|
||||
cs_forward_lt8:
|
||||
b.eq cs_forward_0
|
||||
cmp x7, #4
|
||||
b.lt cs_forward_2
|
||||
b.eq cs_forward_4
|
||||
|
||||
cs_forward_6:
|
||||
ldrh w3, [x17, #-6]
|
||||
strh w3, [x18, #-6]
|
||||
cs_forward_4:
|
||||
ldrh w4, [x17, #-4]
|
||||
strh w4, [x18, #-4]
|
||||
cs_forward_2:
|
||||
ldrh w5, [x17, #-2]
|
||||
strh w5, [x18, #-2]
|
||||
cs_forward_0:
|
||||
ret
|
||||
|
||||
|
||||
# Copy [x0, x0 + x3 + 14) to [x1, x1 + x3 + 14)
|
||||
# x3 >= 0
|
||||
.p2align 6
|
||||
cs_forward_loop:
|
||||
subs x3, x3, #14
|
||||
|
||||
ldrh w4, [x0], #14
|
||||
ldrh w5, [x0, #-12]
|
||||
ldrh w6, [x0, #-10]
|
||||
ldrh w7, [x0, #-8]
|
||||
ldrh w8, [x0, #-6]
|
||||
ldrh w9, [x0, #-4]
|
||||
ldrh w10, [x0, #-2]
|
||||
|
||||
strh w4, [x1], #14
|
||||
strh w5, [x1, #-12]
|
||||
strh w6, [x1, #-10]
|
||||
strh w7, [x1, #-8]
|
||||
strh w8, [x1, #-6]
|
||||
strh w9, [x1, #-4]
|
||||
strh w10, [x1, #-2]
|
||||
|
||||
b.ge cs_forward_loop
|
||||
# Exactly 16 instruction from cs_forward_loop, so loop fits into one cache line
|
||||
|
||||
adds x2, x3, #14
|
||||
# x2 bytes should be copied from x0 to x1
|
||||
b.ne cs_forward_lt14
|
||||
ret
|
||||
|
||||
# Very similar to forward copying
|
||||
cs_backward:
|
||||
subs x3, x2, #14
|
||||
b.ge cs_backward_loop
|
||||
|
||||
cs_backward_lt14:
|
||||
ands x7, x2, #7
|
||||
tbz x2, #3, cs_backward_lt8
|
||||
|
||||
ldrh w3, [x17, #-8]
|
||||
ldrh w4, [x17, #-6]
|
||||
ldrh w5, [x17, #-4]
|
||||
ldrh w6, [x17, #-2]
|
||||
|
||||
strh w3, [x18, #-8]
|
||||
strh w4, [x18, #-6]
|
||||
strh w5, [x18, #-4]
|
||||
strh w6, [x18, #-2]
|
||||
|
||||
cs_backward_lt8:
|
||||
b.eq cs_backward_0
|
||||
cmp x7, #4
|
||||
b.lt cs_backward_2
|
||||
b.eq cs_backward_4
|
||||
|
||||
cs_backward_6:
|
||||
ldrh w3, [x0, #4]
|
||||
strh w3, [x1, #4]
|
||||
|
||||
cs_backward_4:
|
||||
ldrh w4, [x0, #2]
|
||||
strh w4, [x1, #2]
|
||||
|
||||
cs_backward_2:
|
||||
ldrh w5, [x0, #0]
|
||||
strh w5, [x1, #0]
|
||||
|
||||
cs_backward_0:
|
||||
ret
|
||||
|
||||
|
||||
.p2align 6
|
||||
cs_backward_loop:
|
||||
subs x3, x3, #14
|
||||
|
||||
ldrh w4, [x17, #-14]!
|
||||
ldrh w5, [x17, #2]
|
||||
ldrh w6, [x17, #4]
|
||||
ldrh w7, [x17, #6]
|
||||
ldrh w8, [x17, #8]
|
||||
ldrh w9, [x17, #10]
|
||||
ldrh w10, [x17, #12]
|
||||
|
||||
strh w4, [x18, #-14]!
|
||||
strh w5, [x18, #2]
|
||||
strh w6, [x18, #4]
|
||||
strh w7, [x18, #6]
|
||||
strh w8, [x18, #8]
|
||||
strh w9, [x18, #10]
|
||||
strh w10, [x18, #12]
|
||||
|
||||
b.ge cs_backward_loop
|
||||
adds x2, x3, #14
|
||||
b.ne cs_backward_lt14
|
||||
ret
|
||||
|
||||
|
||||
# Support for void Copy::arrayof_conjoint_jshorts(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_arrayof_conjoint_jshorts:
|
||||
hlt 1007
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_jlongs_atomic(jlong* from,
|
||||
# jlong* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jlongs_atomic:
|
||||
_Copy_arrayof_conjoint_jlongs:
|
||||
hlt 1009
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_jints_atomic(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jints_atomic:
|
||||
_Copy_arrayof_conjoint_jints:
|
||||
# These and further memory prefetches may hit out of array ranges.
|
||||
# Experiments showed that prefetching of inaccessible memory doesn't result in exceptions.
|
||||
prfm pldl1keep, [x0, #0]
|
||||
prfm pstl1keep, [x1, #0]
|
||||
prfm pldl1keep, [x0, #32]
|
||||
prfm pstl1keep, [x1, #32]
|
||||
|
||||
subs x3, x1, x0
|
||||
# hi condition is met <=> from < to
|
||||
ccmp x2, x3, #0, hi
|
||||
# hi condition is met <=> (from < to) and (to - from < count)
|
||||
b.hi ci_backward
|
||||
|
||||
subs x18, x2, #64
|
||||
b.ge ci_forward_large
|
||||
|
||||
ci_forward_lt_64:
|
||||
# Copy [x0, x0 + x2) to [x1, x1 + x2)
|
||||
|
||||
adr x15, ci_forward_tail_table_base
|
||||
and x16, x2, #~4
|
||||
|
||||
# Calculate address to jump and store it to x15:
|
||||
# Each pair of instructions before ci_forward_tail_table_base copies 8 bytes.
|
||||
# x16 is count of bytes to copy aligned down by 8.
|
||||
# So x16/8 pairs of instructions should be executed.
|
||||
# Each pair takes 8 bytes, so x15 = ci_forward_tail_table_base - (x16/8)*8 = x15 - x16
|
||||
sub x15, x15, x16
|
||||
prfm plil1keep, [x15]
|
||||
|
||||
add x17, x0, x2
|
||||
add x18, x1, x2
|
||||
|
||||
# If x2 = x16 + 4, then copy 4 bytes and x16 bytes after that.
|
||||
# Otherwise x2 = x16, so proceed to copy x16 bytes.
|
||||
tbz x2, #2, ci_forward_lt_64_even
|
||||
ldr w3, [x0]
|
||||
str w3, [x1]
|
||||
ci_forward_lt_64_even:
|
||||
# Copy [x17 - x16, x17) to [x18 - x16, x18)
|
||||
# x16 is aligned by 8 and less than 64
|
||||
|
||||
# Execute (x16/8) ldp-stp pairs; each pair copies 8 bytes
|
||||
br x15
|
||||
|
||||
ldp w3, w4, [x17, #-56]
|
||||
stp w3, w4, [x18, #-56]
|
||||
ldp w5, w6, [x17, #-48]
|
||||
stp w5, w6, [x18, #-48]
|
||||
ldp w7, w8, [x17, #-40]
|
||||
stp w7, w8, [x18, #-40]
|
||||
ldp w9, w10, [x17, #-32]
|
||||
stp w9, w10, [x18, #-32]
|
||||
ldp w11, w12, [x17, #-24]
|
||||
stp w11, w12, [x18, #-24]
|
||||
ldp w13, w14, [x17, #-16]
|
||||
stp w13, w14, [x18, #-16]
|
||||
ldp w15, w16, [x17, #-8]
|
||||
stp w15, w16, [x18, #-8]
|
||||
ci_forward_tail_table_base:
|
||||
ret
|
||||
|
||||
.p2align 6
|
||||
.rept 12
|
||||
nop
|
||||
.endr
|
||||
ci_forward_large:
|
||||
# x18 >= 0;
|
||||
# Copy [x0, x0 + x18 + 64) to [x1, x1 + x18 + 64)
|
||||
|
||||
ldp w3, w4, [x0], #32
|
||||
ldp w5, w6, [x0, #-24]
|
||||
ldp w7, w8, [x0, #-16]
|
||||
ldp w9, w10, [x0, #-8]
|
||||
|
||||
# Before and after each iteration of loop registers w3-w10 contain [x0 - 32, x0),
|
||||
# and x1 is a place to copy this data;
|
||||
# x18 contains number of bytes to be stored minus 64
|
||||
|
||||
# Exactly 16 instructions from p2align, so ci_forward_loop starts from cache line boundary
|
||||
# Checking it explictly by aligning with "hlt 1000" instructions
|
||||
.p2alignl 6, 0xd4407d00
|
||||
ci_forward_loop:
|
||||
prfm pldl1keep, [x0, #32]
|
||||
prfm pstl1keep, [x1, #32]
|
||||
|
||||
subs x18, x18, #32
|
||||
|
||||
stp w3, w4, [x1, #0]
|
||||
ldp w3, w4, [x0, #0]
|
||||
stp w5, w6, [x1, #8]
|
||||
ldp w5, w6, [x0, #8]
|
||||
stp w7, w8, [x1, #16]
|
||||
ldp w7, w8, [x0, #16]
|
||||
stp w9, w10, [x1, #24]
|
||||
ldp w9, w10, [x0, #24]
|
||||
|
||||
add x1, x1, #32
|
||||
add x0, x0, #32
|
||||
|
||||
b.ge ci_forward_loop
|
||||
|
||||
# 14 instructions from ci_forward_loop, so the loop body hits into one cache line
|
||||
|
||||
ci_forward_loop_end:
|
||||
adds x2, x18, #32
|
||||
|
||||
stp w3, w4, [x1], #32
|
||||
stp w5, w6, [x1, #-24]
|
||||
stp w7, w8, [x1, #-16]
|
||||
stp w9, w10, [x1, #-8]
|
||||
|
||||
# Increased x18 by 32, but stored 32 bytes, so x2 contains exact number of bytes to be stored
|
||||
|
||||
# If this number is not zero, also copy remaining bytes
|
||||
b.ne ci_forward_lt_64
|
||||
ret
|
||||
|
||||
ci_backward:
|
||||
|
||||
# Overlapping case should be the rare one, it does not worth optimizing
|
||||
|
||||
ands x3, x2, #~4
|
||||
# x3 is count aligned down by 2*jintSize
|
||||
add x0, x0, x2
|
||||
add x1, x1, x2
|
||||
sub x3, x3, #8
|
||||
# Skip loop if 0 or 1 jints
|
||||
b.eq ci_backward_loop_end
|
||||
|
||||
# x3 >= 0
|
||||
# Copy [x0 - x3 - 8, x0) to [x1 - x3 - 8, x1) backward
|
||||
ci_backward_loop:
|
||||
subs x3, x3, #8
|
||||
ldp w4, w5, [x0, #-8]!
|
||||
stp w4, w5, [x1, #-8]!
|
||||
b.ge ci_backward_loop
|
||||
|
||||
ci_backward_loop_end:
|
||||
# Copy remaining 0 or 1 jints
|
||||
tbz x2, #2, ci_backward_finish
|
||||
ldr w3, [x0, #-4]
|
||||
str w3, [x1, #-4]
|
||||
|
||||
ci_backward_finish:
|
||||
ret
|
@ -32,8 +32,7 @@
|
||||
|
||||
// Implementation of class OrderAccess.
|
||||
// - we define the high level barriers below and use the general
|
||||
// implementation in orderAccess.hpp, with customizations
|
||||
// on AARCH64 via the specialized_* template functions
|
||||
// implementation in orderAccess.hpp.
|
||||
|
||||
// Memory Ordering on ARM is weak.
|
||||
//
|
||||
@ -56,9 +55,6 @@
|
||||
// }
|
||||
|
||||
inline static void dmb_sy() {
|
||||
#ifdef AARCH64
|
||||
__asm__ __volatile__ ("dmb sy" : : : "memory");
|
||||
#else
|
||||
if (VM_Version::arm_arch() >= 7) {
|
||||
#ifdef __thumb__
|
||||
__asm__ volatile (
|
||||
@ -73,13 +69,9 @@ inline static void dmb_sy() {
|
||||
"mcr p15, 0, %0, c7, c10, 5"
|
||||
: : "r" (zero) : "memory");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static void dmb_st() {
|
||||
#ifdef AARCH64
|
||||
__asm__ __volatile__ ("dmb st" : : : "memory");
|
||||
#else
|
||||
if (VM_Version::arm_arch() >= 7) {
|
||||
#ifdef __thumb__
|
||||
__asm__ volatile (
|
||||
@ -94,16 +86,11 @@ inline static void dmb_st() {
|
||||
"mcr p15, 0, %0, c7, c10, 5"
|
||||
: : "r" (zero) : "memory");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Load-Load/Store barrier
|
||||
inline static void dmb_ld() {
|
||||
#ifdef AARCH64
|
||||
__asm__ __volatile__ ("dmb ld" : : : "memory");
|
||||
#else
|
||||
dmb_sy();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -115,123 +102,4 @@ inline void OrderAccess::storeload() { dmb_sy(); }
|
||||
inline void OrderAccess::release() { dmb_sy(); }
|
||||
inline void OrderAccess::fence() { dmb_sy(); }
|
||||
|
||||
// specializations for Aarch64
|
||||
// TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE>
|
||||
{
|
||||
template <typename T>
|
||||
T operator()(const volatile T* p) const {
|
||||
volatile T result;
|
||||
__asm__ volatile(
|
||||
"ldarb %w[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE>
|
||||
{
|
||||
template <typename T>
|
||||
T operator()(const volatile T* p) const {
|
||||
volatile T result;
|
||||
__asm__ volatile(
|
||||
"ldarh %w[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE>
|
||||
{
|
||||
template <typename T>
|
||||
T operator()(const volatile T* p) const {
|
||||
volatile T result;
|
||||
__asm__ volatile(
|
||||
"ldar %w[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE>
|
||||
{
|
||||
template <typename T>
|
||||
T operator()(const volatile T* p) const {
|
||||
volatile T result;
|
||||
__asm__ volatile(
|
||||
"ldar %[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
__asm__ volatile(
|
||||
"stlrb %w[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
__asm__ volatile(
|
||||
"stlrh %w[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
__asm__ volatile(
|
||||
"stlr %w[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
__asm__ volatile(
|
||||
"stlr %[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
|
||||
|
@ -78,7 +78,7 @@
|
||||
|
||||
// Don't #define SPELL_REG_FP for thumb because it is not safe to use, so this makes sure we never fetch it.
|
||||
#ifndef __thumb__
|
||||
#define SPELL_REG_FP AARCH64_ONLY("x29") NOT_AARCH64("fp")
|
||||
#define SPELL_REG_FP "fp"
|
||||
#endif
|
||||
|
||||
address os::current_stack_pointer() {
|
||||
@ -91,19 +91,6 @@ char* os::non_memory_address_word() {
|
||||
return (char*) -1;
|
||||
}
|
||||
|
||||
void os::initialize_thread(Thread* thr) {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
#define arm_pc pc
|
||||
#define arm_sp sp
|
||||
#define arm_fp regs[29]
|
||||
#define arm_r0 regs[0]
|
||||
#define ARM_REGS_IN_CONTEXT 31
|
||||
|
||||
#else
|
||||
|
||||
#if NGREG == 16
|
||||
// These definitions are based on the observation that until
|
||||
@ -119,7 +106,6 @@ void os::initialize_thread(Thread* thr) {
|
||||
|
||||
#define ARM_REGS_IN_CONTEXT 16
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
address os::Linux::ucontext_get_pc(const ucontext_t* uc) {
|
||||
return (address)uc->uc_mcontext.arm_pc;
|
||||
@ -260,13 +246,11 @@ frame os::current_frame() {
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
extern "C" address check_vfp_fault_instr;
|
||||
extern "C" address check_vfp3_32_fault_instr;
|
||||
|
||||
address check_vfp_fault_instr = NULL;
|
||||
address check_vfp3_32_fault_instr = NULL;
|
||||
#endif // !AARCH64
|
||||
extern "C" address check_simd_fault_instr;
|
||||
address check_simd_fault_instr = NULL;
|
||||
|
||||
@ -286,8 +270,8 @@ extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info,
|
||||
|
||||
if (sig == SIGILL &&
|
||||
((info->si_addr == (caddr_t)check_simd_fault_instr)
|
||||
NOT_AARCH64(|| info->si_addr == (caddr_t)check_vfp_fault_instr)
|
||||
NOT_AARCH64(|| info->si_addr == (caddr_t)check_vfp3_32_fault_instr))) {
|
||||
|| info->si_addr == (caddr_t)check_vfp_fault_instr
|
||||
|| info->si_addr == (caddr_t)check_vfp3_32_fault_instr)) {
|
||||
// skip faulty instruction + instruction that sets return value to
|
||||
// success and set return value to failure.
|
||||
os::Linux::ucontext_set_pc(uc, (address)info->si_addr + 8);
|
||||
@ -512,9 +496,6 @@ void os::Linux::set_fpu_control_word(int fpu_control) {
|
||||
}
|
||||
|
||||
void os::setup_fpu() {
|
||||
#ifdef AARCH64
|
||||
__asm__ volatile ("msr fpcr, xzr");
|
||||
#else
|
||||
#if !defined(__SOFTFP__) && defined(__VFP_FP__)
|
||||
// Turn on IEEE-754 compliant VFP mode
|
||||
__asm__ volatile (
|
||||
@ -523,7 +504,6 @@ void os::setup_fpu() {
|
||||
: /* no output */ : /* no input */ : "r0"
|
||||
);
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
bool os::is_allocatable(size_t bytes) {
|
||||
@ -559,14 +539,8 @@ void os::print_context(outputStream *st, const void *context) {
|
||||
st->print_cr(" %-3s = " INTPTR_FORMAT, as_Register(r)->name(), reg_area[r]);
|
||||
}
|
||||
#define U64_FORMAT "0x%016llx"
|
||||
#ifdef AARCH64
|
||||
st->print_cr(" %-3s = " U64_FORMAT, "sp", uc->uc_mcontext.sp);
|
||||
st->print_cr(" %-3s = " U64_FORMAT, "pc", uc->uc_mcontext.pc);
|
||||
st->print_cr(" %-3s = " U64_FORMAT, "pstate", uc->uc_mcontext.pstate);
|
||||
#else
|
||||
// now print flag register
|
||||
st->print_cr(" %-4s = 0x%08lx", "cpsr",uc->uc_mcontext.arm_cpsr);
|
||||
#endif
|
||||
st->cr();
|
||||
|
||||
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
|
||||
@ -595,16 +569,10 @@ void os::print_register_info(outputStream *st, const void *context) {
|
||||
print_location(st, reg_area[r]);
|
||||
st->cr();
|
||||
}
|
||||
#ifdef AARCH64
|
||||
st->print_cr(" %-3s = " U64_FORMAT, "pc", uc->uc_mcontext.pc);
|
||||
print_location(st, uc->uc_mcontext.pc);
|
||||
st->cr();
|
||||
#endif
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
#ifndef AARCH64
|
||||
|
||||
typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
|
||||
|
||||
@ -714,7 +682,6 @@ int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_val
|
||||
return old_value;
|
||||
}
|
||||
|
||||
#endif // !AARCH64
|
||||
|
||||
#ifndef PRODUCT
|
||||
void os::verify_stack_alignment() {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user