This commit is contained in:
Andrew Haley 2019-07-11 11:36:56 +01:00
commit 60b005d766
191 changed files with 5360 additions and 2418 deletions
.hgtags
make
src
hotspot
java.base/share/classes

@ -565,3 +565,5 @@ b034d2dee5fc93d42a81b65e58ce3f91e42586ff jdk-13+23
22b3b7983adab54e318f75aeb94471f7a4429c1e jdk-13+25
0692b67f54621991ba7afbf23e55b788f3555e69 jdk-13+26
b7f68ddec66f996ae3aad03291d129ca9f02482d jdk-13+27
1e95931e7d8fa7e3899340a9c7cb28dbea50c10c jdk-13+28
3081f39a3d30d63b112098386ac2bb027c2b7223 jdk-13+29

@ -74,6 +74,8 @@ h4 {
margin: 1.5ex 0pt 1ex 0pt;
}
a { text-decoration: none }
a:link {
color: #4A6782;
}

@ -21,4 +21,4 @@
# or visit www.oracle.com if you need additional information or have any
# questions.
#
tzdata2018g
tzdata2019a

@ -387,6 +387,11 @@ Zone Africa/Cairo 2:05:09 - LMT 1900 Oct
# See Africa/Lagos.
# Eritrea
# See Africa/Nairobi.
# Eswatini (formerly Swaziland)
# See Africa/Johannesburg.
# Ethiopia
# See Africa/Nairobi.
#
@ -870,8 +875,41 @@ Zone Indian/Mauritius 3:50:00 - LMT 1907 # Port Louis
# From Mohamed Essedik Najd (2018-10-26):
# Today, a Moroccan government council approved the perpetual addition
# of 60 minutes to the regular Moroccan timezone.
# From Brian Inglis (2018-10-26):
# http://www.maroc.ma/fr/actualites/le-conseil-de-gouvernement-adopte-un-projet-de-decret-relatif-lheure-legale-stipulant-le
# From Matt Johnson (2018-10-28):
# http://www.sgg.gov.ma/Portals/1/BO/2018/BO_6720-bis_Ar.pdf
#
# From Maamar Abdelkader (2018-11-01):
# We usually move clocks back the previous week end and come back to the +1
# the week end after.... The government does not announce yet the decision
# about this temporary change. But it s 99% sure that it will be the case,
# as in previous years. An unofficial survey was done these days, showing
# that 64% of asked peopke are ok for moving from +1 to +0 during Ramadan.
# https://leconomiste.com/article/1035870-enquete-l-economiste-sunergia-64-des-marocains-plebiscitent-le-gmt-pendant-ramadan
#
# From Paul Eggert (2018-11-01):
# For now, guess that Morocco will fall back at 03:00 the last Sunday
# before Ramadan, and spring forward at 02:00 the first Sunday after
# Ramadan, as this has been the practice since 2012. To implement this,
# transition dates for 2019 through 2037 were determined by running the
# following program under GNU Emacs 26.1.
# (let ((islamic-year 1440))
# (require 'cal-islam)
# (while (< islamic-year 1460)
# (let ((a (calendar-islamic-to-absolute (list 9 1 islamic-year)))
# (b (calendar-islamic-to-absolute (list 10 1 islamic-year)))
# (sunday 0))
# (while (/= sunday (mod (setq a (1- a)) 7)))
# (while (/= sunday (mod b 7))
# (setq b (1+ b)))
# (setq a (calendar-gregorian-from-absolute a))
# (setq b (calendar-gregorian-from-absolute b))
# (insert
# (format
# (concat "Rule\tMorocco\t%d\tonly\t-\t%s\t%2d\t 3:00\t-1:00\t-\n"
# "Rule\tMorocco\t%d\tonly\t-\t%s\t%2d\t 2:00\t0\t-\n")
# (car (cdr (cdr a))) (calendar-month-name (car a) t) (car (cdr a))
# (car (cdr (cdr b))) (calendar-month-name (car b) t) (car (cdr b)))))
# (setq islamic-year (+ 1 islamic-year))))
# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Morocco 1939 only - Sep 12 0:00 1:00 -
@ -903,7 +941,7 @@ Rule Morocco 2012 only - Aug 20 2:00 1:00 -
Rule Morocco 2012 only - Sep 30 3:00 0 -
Rule Morocco 2013 only - Jul 7 3:00 0 -
Rule Morocco 2013 only - Aug 10 2:00 1:00 -
Rule Morocco 2013 2018 - Oct lastSun 3:00 0 -
Rule Morocco 2013 2017 - Oct lastSun 3:00 0 -
Rule Morocco 2014 2018 - Mar lastSun 2:00 1:00 -
Rule Morocco 2014 only - Jun 28 3:00 0 -
Rule Morocco 2014 only - Aug 2 2:00 1:00 -
@ -915,13 +953,53 @@ Rule Morocco 2017 only - May 21 3:00 0 -
Rule Morocco 2017 only - Jul 2 2:00 1:00 -
Rule Morocco 2018 only - May 13 3:00 0 -
Rule Morocco 2018 only - Jun 17 2:00 1:00 -
Rule Morocco 2019 only - May 5 3:00 0 -
Rule Morocco 2019 only - Jun 9 2:00 1:00 -
Rule Morocco 2020 only - Apr 19 3:00 0 -
Rule Morocco 2020 only - May 24 2:00 1:00 -
Rule Morocco 2021 only - Apr 11 3:00 0 -
Rule Morocco 2021 only - May 16 2:00 1:00 -
Rule Morocco 2022 only - Mar 27 3:00 0 -
Rule Morocco 2022 only - May 8 2:00 1:00 -
Rule Morocco 2023 only - Mar 19 3:00 0 -
Rule Morocco 2023 only - Apr 23 2:00 1:00 -
Rule Morocco 2024 only - Mar 10 3:00 0 -
Rule Morocco 2024 only - Apr 14 2:00 1:00 -
Rule Morocco 2025 only - Feb 23 3:00 0 -
Rule Morocco 2025 only - Apr 6 2:00 1:00 -
Rule Morocco 2026 only - Feb 15 3:00 0 -
Rule Morocco 2026 only - Mar 22 2:00 1:00 -
Rule Morocco 2027 only - Feb 7 3:00 0 -
Rule Morocco 2027 only - Mar 14 2:00 1:00 -
Rule Morocco 2028 only - Jan 23 3:00 0 -
Rule Morocco 2028 only - Feb 27 2:00 1:00 -
Rule Morocco 2029 only - Jan 14 3:00 0 -
Rule Morocco 2029 only - Feb 18 2:00 1:00 -
Rule Morocco 2029 only - Dec 30 3:00 0 -
Rule Morocco 2030 only - Feb 10 2:00 1:00 -
Rule Morocco 2030 only - Dec 22 3:00 0 -
Rule Morocco 2031 only - Jan 26 2:00 1:00 -
Rule Morocco 2031 only - Dec 14 3:00 0 -
Rule Morocco 2032 only - Jan 18 2:00 1:00 -
Rule Morocco 2032 only - Nov 28 3:00 0 -
Rule Morocco 2033 only - Jan 9 2:00 1:00 -
Rule Morocco 2033 only - Nov 20 3:00 0 -
Rule Morocco 2033 only - Dec 25 2:00 1:00 -
Rule Morocco 2034 only - Nov 5 3:00 0 -
Rule Morocco 2034 only - Dec 17 2:00 1:00 -
Rule Morocco 2035 only - Oct 28 3:00 0 -
Rule Morocco 2035 only - Dec 2 2:00 1:00 -
Rule Morocco 2036 only - Oct 19 3:00 0 -
Rule Morocco 2036 only - Nov 23 2:00 1:00 -
Rule Morocco 2037 only - Oct 4 3:00 0 -
Rule Morocco 2037 only - Nov 15 2:00 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Casablanca -0:30:20 - LMT 1913 Oct 26
0:00 Morocco +00/+01 1984 Mar 16
1:00 - +01 1986
0:00 Morocco +00/+01 2018 Oct 27
1:00 - +01
0:00 Morocco +00/+01 2018 Oct 28 3:00
0:00 Morocco +00/+01
# Western Sahara
#
@ -936,8 +1014,8 @@ Zone Africa/Casablanca -0:30:20 - LMT 1913 Oct 26
Zone Africa/El_Aaiun -0:52:48 - LMT 1934 Jan # El Aaiún
-1:00 - -01 1976 Apr 14
0:00 Morocco +00/+01 2018 Oct 27
1:00 - +01
0:00 Morocco +00/+01 2018 Oct 28 3:00
0:00 Morocco +00/+01
# Mozambique
#
@ -1094,10 +1172,20 @@ Zone Indian/Reunion 3:41:52 - LMT 1911 Jun # Saint-Denis
# the switch is from 01:00 to 02:00 ... [Decree No. 25/2017]
# http://www.mnec.gov.st/index.php/publicacoes/documentos/file/90-decreto-lei-n-25-2017
# From Vadim Nasardinov (2018-12-29):
# São Tomé and Príncipe is about to do the following on Jan 1, 2019:
# https://www.stp-press.st/2018/12/05/governo-jesus-ja-decidiu-repor-hora-legal-sao-tomense/
#
# From Michael Deckers (2018-12-30):
# https://www.legis-palop.org/download.jsp?idFile=102818
# ... [The legal time of the country, which coincides with universal
# coordinated time, will be restituted at 2 o'clock on day 1 of January, 2019.]
Zone Africa/Sao_Tome 0:26:56 - LMT 1884
-0:36:45 - LMT 1912 Jan 1 00:00u # Lisbon MT
0:00 - GMT 2018 Jan 1 01:00
1:00 - WAT
1:00 - WAT 2019 Jan 1 02:00
0:00 - GMT
# Senegal
# See Africa/Abidjan.
@ -1128,7 +1216,7 @@ Zone Africa/Johannesburg 1:52:00 - LMT 1892 Feb 8
1:30 - SAST 1903 Mar
2:00 SA SAST
Link Africa/Johannesburg Africa/Maseru # Lesotho
Link Africa/Johannesburg Africa/Mbabane # Swaziland
Link Africa/Johannesburg Africa/Mbabane # Eswatini
#
# Marion and Prince Edward Is
# scientific station since 1947
@ -1170,9 +1258,6 @@ Zone Africa/Juba 2:06:28 - LMT 1931
2:00 Sudan CA%sT 2000 Jan 15 12:00
3:00 - EAT
# Swaziland
# See Africa/Johannesburg.
# Tanzania
# See Africa/Nairobi.

@ -609,12 +609,82 @@ Zone Asia/Urumqi 5:50:20 - LMT 1928
# obtained from
# http://www.hko.gov.hk/gts/time/Summertime.htm
# From Arthur David Olson (2009-10-28):
# From Phake Nick (2018-10-27):
# According to Singaporean newspaper
# http://eresources.nlb.gov.sg/newspapers/Digitised/Article/singfreepresswk19041102-1.2.37
# the day that Hong Kong start using GMT+8 should be Oct 30, 1904.
#
# From Paul Eggert (2018-11-17):
# Hong Kong had a time ball near the Marine Police Station, Tsim Sha Tsui.
# "The ball was raised manually each day and dropped at exactly 1pm
# (except on Sundays and Government holidays)."
# Dyson AD. From Time Ball to Atomic Clock. Hong Kong Government. 1983.
# <https://www.hko.gov.hk/publica/gen_pub/timeball_atomic_clock.pdf>
# "From 1904 October 30 the time-ball at Hong Kong has been dropped by order
# of the Governor of the Colony at 17h 0m 0s G.M.T., which is 23m 18s.14 in
# advance of 1h 0m 0s of Hong Kong mean time."
# Hollis HP. Universal Time, Longitudes, and Geodesy. Mon Not R Astron Soc.
# 1905-02-10;65(4):405-6. https://doi.org/10.1093/mnras/65.4.382
#
# From Joseph Myers (2018-11-18):
# An astronomer before 1925 referring to GMT would have been using the old
# astronomical convention where the day started at noon, not midnight.
#
# From Steve Allen (2018-11-17):
# Meteorological Observations made at the Hongkong Observatory in the year 1904
# page 4 <https://books.google.com/books?id=kgw5AQAAMAAJ&pg=RA4-PA4>
# ... the log of drop times in Table II shows that on Sunday 1904-10-30 the
# ball was dropped. So that looks like a special case drop for the sake
# of broadcasting the new local time.
#
# From Phake Nick (2018-11-18):
# According to The Hong Kong Weekly Press, 1904-10-29, p.324, the
# governor of Hong Kong at the time stated that "We are further desired to
# make it known that the change will be effected by firing the gun and by the
# dropping of the Ball at 23min. 18sec. before one."
# From Paul Eggert (2018-11-18):
# See <https://mmis.hkpl.gov.hk> for this; unfortunately Flash is required.
# From Phake Nick (2018-10-26):
# I went to check microfilm records stored at Hong Kong Public Library....
# on September 30 1941, according to Ta Kung Pao (Hong Kong edition), it was
# stated that fallback would occur on the next day (the 1st)'s "03:00 am (Hong
# Kong Time 04:00 am)" and the clock will fall back for a half hour. (03:00
# probably refer to the time commonly used in mainland China at the time given
# the paper's background) ... the sunrise/sunset time given by South China
# Morning Post for October 1st was indeed moved by half an hour compares to
# before. After that, in December, the battle to capture Hong Kong started and
# the library doesn't seems to have any record stored about press during that
# period of time. Some media resumed publication soon after that within the
# same month, but there were not much information about time there. Later they
# started including a radio program guide when they restored radio service,
# explicitly mentioning it use Tokyo standard time, and later added a note
# saying it's half an hour ahead of the old Hong Kong standard time, and it
# also seems to indicate that Hong Kong was not using GMT+8 when it was
# captured by Japan.
#
# Image of related sections on newspaper:
# * 1941-09-30, Ta Kung Pao (Hong Kong), "Winter Time start tomorrow".
# https://i.imgur.com/6waY51Z.jpg (Chinese)
# * 1941-09-29, South China Morning Post, Information on sunrise/sunset
# time and other things for September 30 and October 1.
# https://i.imgur.com/kCiUR78.jpg
# * 1942-02-05. The Hong Kong News, Radio Program Guide.
# https://i.imgur.com/eVvDMzS.jpg
# * 1941-06-14. Hong Kong Daily Press, Daylight Saving from 3am Tomorrow.
# https://i.imgur.com/05KkvtC.png
# * 1941-09-30, Hong Kong Daily Press, Winter Time Warning.
# https://i.imgur.com/dge4kFJ.png
# Also, the Liberation day of Hong Kong after WWII which British rule
# over the territory resumed was August 30, 1945, which I think should
# be the termination date for the use of JST in the territory....
# From Paul Eggert (2018-11-17):
# Here are the dates given at
# http://www.hko.gov.hk/gts/time/Summertime.htm
# as of 2009-10-28:
# https://www.hko.gov.hk/gts/time/Summertime.htm
# as of 2014-06-19:
# Year Period
# 1941 1 Apr to 30 Sep
# 1941 15 Jun to 30 Sep
# 1942 Whole year
# 1943 Whole year
# 1944 Whole year
@ -625,7 +695,7 @@ Zone Asia/Urumqi 5:50:20 - LMT 1928
# 1949 3 Apr to 30 Oct
# 1950 2 Apr to 29 Oct
# 1951 1 Apr to 28 Oct
# 1952 6 Apr to 25 Oct
# 1952 6 Apr to 2 Nov
# 1953 5 Apr to 1 Nov
# 1954 21 Mar to 31 Oct
# 1955 20 Mar to 6 Nov
@ -654,25 +724,25 @@ Zone Asia/Urumqi 5:50:20 - LMT 1928
# 1978 Nil
# 1979 13 May to 21 Oct
# 1980 to Now Nil
# The page does not give start or end times of day.
# The page does not give a start date for 1942.
# The page does not givw an end date for 1945.
# The Japanese occupation of Hong Kong began on 1941-12-25.
# The Japanese surrender of Hong Kong was signed 1945-09-15.
# For lack of anything better, use start of those days as the transition times.
# The page does not give times of day for transitions,
# or dates for the 1942 and 1945 transitions.
# The Japanese occupation of Hong Kong began 1941-12-25.
# The Japanese surrender of Hong Kong was signed 1945-09-16; see:
# Heaver S. The days after the Pacific war ended: unsettling times
# in Hong Kong. Post Magazine. 2016-06-13.
# https://www.scmp.com/magazines/post-magazine/article/1852990/days-after-pacific-war-ended-unsettling-times-hong-kong
# For lack of anything better, use start of those days as the
# transition times.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule HK 1941 only - Apr 1 3:30 1:00 S
Rule HK 1941 only - Sep 30 3:30 0 -
Rule HK 1946 only - Apr 20 3:30 1:00 S
Rule HK 1946 only - Dec 1 3:30 0 -
Rule HK 1947 only - Apr 13 3:30 1:00 S
Rule HK 1947 only - Dec 30 3:30 0 -
Rule HK 1948 only - May 2 3:30 1:00 S
Rule HK 1948 1951 - Oct lastSun 3:30 0 -
Rule HK 1952 only - Oct 25 3:30 0 -
Rule HK 1952 1953 - Nov Sun>=1 3:30 0 -
Rule HK 1949 1953 - Apr Sun>=1 3:30 1:00 S
Rule HK 1953 only - Nov 1 3:30 0 -
Rule HK 1954 1964 - Mar Sun>=18 3:30 1:00 S
Rule HK 1954 only - Oct 31 3:30 0 -
Rule HK 1955 1964 - Nov Sun>=1 3:30 0 -
@ -682,9 +752,11 @@ Rule HK 1973 only - Dec 30 3:30 1:00 S
Rule HK 1979 only - May Sun>=8 3:30 1:00 S
Rule HK 1979 only - Oct Sun>=16 3:30 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Hong_Kong 7:36:42 - LMT 1904 Oct 30
8:00 HK HK%sT 1941 Dec 25
9:00 - JST 1945 Sep 15
Zone Asia/Hong_Kong 7:36:42 - LMT 1904 Oct 30 0:36:42
8:00 - HKT 1941 Jun 15 3:30
8:00 1:00 HKST 1941 Oct 1 4:00
8:30 - HKT 1941 Dec 25
9:00 - JST 1945 Sep 16
8:00 HK HK%sT
###############################################################################
@ -1080,6 +1152,16 @@ Zone Asia/Dili 8:22:20 - LMT 1912 Jan 1
# India
# British astronomer Henry Park Hollis disliked India Standard Time's offset:
# "A new time system has been proposed for India, Further India, and Burmah.
# The scheme suggested is that the times of the meridians 5½ and 6½ hours
# east of Greenwich should be adopted in these territories. No reason is
# given why hourly meridians five hours and six hours east should not be
# chosen; a plan which would bring the time of India into harmony with
# that of almost the whole of the civilised world."
# Hollis HP. Universal Time, Longitudes, and Geodesy. Mon Not R Astron Soc.
# 1905-02-10;65(4):405-6. https://doi.org/10.1093/mnras/65.4.382
# From Ian P. Beacock, in "A brief history of (modern) time", The Atlantic
# https://www.theatlantic.com/technology/archive/2015/12/the-creation-of-modern-time/421419/
# (2015-12-22):
@ -1250,12 +1332,65 @@ Zone Asia/Jayapura 9:22:48 - LMT 1932 Nov
# leap year calculation involved. There has never been any serious
# plan to change that law....
#
# From Paul Eggert (2006-03-22):
# From Paul Eggert (2018-11-30):
# Go with Shanks & Pottenger before Sept. 1991, and with Pournader thereafter.
# I used Ed Reingold's cal-persia in GNU Emacs 21.2 to check Persian dates,
# stopping after 2037 when 32-bit time_t's overflow.
# That cal-persia used Birashk's approximation, which disagrees with the solar
# calendar predictions for the year 2025, so I corrected those dates by hand.
# I used the following code in GNU Emacs 26.1 to generate the "Rule Iran"
# lines from 2008 through 2087. Emacs 26.1 uses Ed Reingold's
# cal-persia implementation of Birashk's approximation, which in the
# 2008-2087 range disagrees with the the astronomical Persian calendar
# for Persian years 1404 (Gregorian 2025) and 1437 (Gregorian 2058),
# so the following code special-case those years. See Table 15.1, page 264, of:
# Edward M. Reingold and Nachum Dershowitz, Calendrical Calculations:
# The Ultimate Edition, Cambridge University Press (2018).
# https://www.cambridge.org/fr/academic/subjects/computer-science/computing-general-interest/calendrical-calculations-ultimate-edition-4th-edition
# Page 258, footnote 2, of this book says there is some dispute over what will
# happen in 2091 (and some other years after that), so this code
# stops in 2087, as 2088 and 2089 agree with the "max" rule below.
# (cl-loop
# initially (require 'cal-persia)
# with first-persian-year = 1387
# with last-persian-year = 1466
# ;; Exceptional years in the above range,
# ;; from Reingold & Dershowitz Table 15.1, page 264:
# with exceptional-persian-years = '(1404 1437)
# with range-start = nil
# for persian-year from first-persian-year to last-persian-year
# do
# (let*
# ((exceptional-year-offset
# (if (member persian-year exceptional-persian-years) 1 0))
# (beg-dst-absolute
# (+ (calendar-persian-to-absolute (list 1 1 persian-year))
# exceptional-year-offset))
# (end-dst-absolute
# (+ (calendar-persian-to-absolute (list 6 30 persian-year))
# exceptional-year-offset))
# (next-year-beg-dst-absolute
# (+ (calendar-persian-to-absolute (list 1 1 (1+ persian-year)))
# (if (member (1+ persian-year) exceptional-persian-years) 1 0)))
# (beg-dst (calendar-gregorian-from-absolute beg-dst-absolute))
# (end-dst (calendar-gregorian-from-absolute end-dst-absolute))
# (next-year-beg-dst (calendar-gregorian-from-absolute
# next-year-beg-dst-absolute))
# (year (calendar-extract-year beg-dst))
# (range-end (if range-start year "only")))
# (setq range-start (or range-start year))
# (when (or (/= (calendar-extract-day beg-dst)
# (calendar-extract-day next-year-beg-dst))
# (= persian-year last-persian-year))
# (insert
# (format
# "Rule\tIran\t%d\t%s\t-\t%s\t%2d\t24:00\t1:00\t-\n"
# range-start range-end
# (calendar-month-name (calendar-extract-month beg-dst) t)
# (calendar-extract-day beg-dst)))
# (insert
# (format
# "Rule\tIran\t%d\t%s\t-\t%s\t%2d\t24:00\t0\t-\n"
# range-start range-end
# (calendar-month-name (calendar-extract-month end-dst) t)
# (calendar-extract-day end-dst)))
# (setq range-start nil))))
#
# From Oscar van Vlijmen (2005-03-30), writing about future
# discrepancies between cal-persia and the Iranian calendar:
@ -1290,61 +1425,113 @@ Zone Asia/Jayapura 9:22:48 - LMT 1932 Nov
# thirtieth day of Shahrivar.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iran 1978 1980 - Mar 21 0:00 1:00 -
Rule Iran 1978 only - Oct 21 0:00 0 -
Rule Iran 1979 only - Sep 19 0:00 0 -
Rule Iran 1980 only - Sep 23 0:00 0 -
Rule Iran 1991 only - May 3 0:00 1:00 -
Rule Iran 1992 1995 - Mar 22 0:00 1:00 -
Rule Iran 1991 1995 - Sep 22 0:00 0 -
Rule Iran 1996 only - Mar 21 0:00 1:00 -
Rule Iran 1996 only - Sep 21 0:00 0 -
Rule Iran 1997 1999 - Mar 22 0:00 1:00 -
Rule Iran 1997 1999 - Sep 22 0:00 0 -
Rule Iran 2000 only - Mar 21 0:00 1:00 -
Rule Iran 2000 only - Sep 21 0:00 0 -
Rule Iran 2001 2003 - Mar 22 0:00 1:00 -
Rule Iran 2001 2003 - Sep 22 0:00 0 -
Rule Iran 2004 only - Mar 21 0:00 1:00 -
Rule Iran 2004 only - Sep 21 0:00 0 -
Rule Iran 2005 only - Mar 22 0:00 1:00 -
Rule Iran 2005 only - Sep 22 0:00 0 -
Rule Iran 2008 only - Mar 21 0:00 1:00 -
Rule Iran 2008 only - Sep 21 0:00 0 -
Rule Iran 2009 2011 - Mar 22 0:00 1:00 -
Rule Iran 2009 2011 - Sep 22 0:00 0 -
Rule Iran 2012 only - Mar 21 0:00 1:00 -
Rule Iran 2012 only - Sep 21 0:00 0 -
Rule Iran 2013 2015 - Mar 22 0:00 1:00 -
Rule Iran 2013 2015 - Sep 22 0:00 0 -
Rule Iran 2016 only - Mar 21 0:00 1:00 -
Rule Iran 2016 only - Sep 21 0:00 0 -
Rule Iran 2017 2019 - Mar 22 0:00 1:00 -
Rule Iran 2017 2019 - Sep 22 0:00 0 -
Rule Iran 2020 only - Mar 21 0:00 1:00 -
Rule Iran 2020 only - Sep 21 0:00 0 -
Rule Iran 2021 2023 - Mar 22 0:00 1:00 -
Rule Iran 2021 2023 - Sep 22 0:00 0 -
Rule Iran 2024 only - Mar 21 0:00 1:00 -
Rule Iran 2024 only - Sep 21 0:00 0 -
Rule Iran 2025 2027 - Mar 22 0:00 1:00 -
Rule Iran 2025 2027 - Sep 22 0:00 0 -
Rule Iran 2028 2029 - Mar 21 0:00 1:00 -
Rule Iran 2028 2029 - Sep 21 0:00 0 -
Rule Iran 2030 2031 - Mar 22 0:00 1:00 -
Rule Iran 2030 2031 - Sep 22 0:00 0 -
Rule Iran 2032 2033 - Mar 21 0:00 1:00 -
Rule Iran 2032 2033 - Sep 21 0:00 0 -
Rule Iran 2034 2035 - Mar 22 0:00 1:00 -
Rule Iran 2034 2035 - Sep 22 0:00 0 -
Rule Iran 1978 1980 - Mar 20 24:00 1:00 -
Rule Iran 1978 only - Oct 20 24:00 0 -
Rule Iran 1979 only - Sep 18 24:00 0 -
Rule Iran 1980 only - Sep 22 24:00 0 -
Rule Iran 1991 only - May 2 24:00 1:00 -
Rule Iran 1992 1995 - Mar 21 24:00 1:00 -
Rule Iran 1991 1995 - Sep 21 24:00 0 -
Rule Iran 1996 only - Mar 20 24:00 1:00 -
Rule Iran 1996 only - Sep 20 24:00 0 -
Rule Iran 1997 1999 - Mar 21 24:00 1:00 -
Rule Iran 1997 1999 - Sep 21 24:00 0 -
Rule Iran 2000 only - Mar 20 24:00 1:00 -
Rule Iran 2000 only - Sep 20 24:00 0 -
Rule Iran 2001 2003 - Mar 21 24:00 1:00 -
Rule Iran 2001 2003 - Sep 21 24:00 0 -
Rule Iran 2004 only - Mar 20 24:00 1:00 -
Rule Iran 2004 only - Sep 20 24:00 0 -
Rule Iran 2005 only - Mar 21 24:00 1:00 -
Rule Iran 2005 only - Sep 21 24:00 0 -
Rule Iran 2008 only - Mar 20 24:00 1:00 -
Rule Iran 2008 only - Sep 20 24:00 0 -
Rule Iran 2009 2011 - Mar 21 24:00 1:00 -
Rule Iran 2009 2011 - Sep 21 24:00 0 -
Rule Iran 2012 only - Mar 20 24:00 1:00 -
Rule Iran 2012 only - Sep 20 24:00 0 -
Rule Iran 2013 2015 - Mar 21 24:00 1:00 -
Rule Iran 2013 2015 - Sep 21 24:00 0 -
Rule Iran 2016 only - Mar 20 24:00 1:00 -
Rule Iran 2016 only - Sep 20 24:00 0 -
Rule Iran 2017 2019 - Mar 21 24:00 1:00 -
Rule Iran 2017 2019 - Sep 21 24:00 0 -
Rule Iran 2020 only - Mar 20 24:00 1:00 -
Rule Iran 2020 only - Sep 20 24:00 0 -
Rule Iran 2021 2023 - Mar 21 24:00 1:00 -
Rule Iran 2021 2023 - Sep 21 24:00 0 -
Rule Iran 2024 only - Mar 20 24:00 1:00 -
Rule Iran 2024 only - Sep 20 24:00 0 -
Rule Iran 2025 2027 - Mar 21 24:00 1:00 -
Rule Iran 2025 2027 - Sep 21 24:00 0 -
Rule Iran 2028 2029 - Mar 20 24:00 1:00 -
Rule Iran 2028 2029 - Sep 20 24:00 0 -
Rule Iran 2030 2031 - Mar 21 24:00 1:00 -
Rule Iran 2030 2031 - Sep 21 24:00 0 -
Rule Iran 2032 2033 - Mar 20 24:00 1:00 -
Rule Iran 2032 2033 - Sep 20 24:00 0 -
Rule Iran 2034 2035 - Mar 21 24:00 1:00 -
Rule Iran 2034 2035 - Sep 21 24:00 0 -
Rule Iran 2036 2037 - Mar 20 24:00 1:00 -
Rule Iran 2036 2037 - Sep 20 24:00 0 -
Rule Iran 2038 2039 - Mar 21 24:00 1:00 -
Rule Iran 2038 2039 - Sep 21 24:00 0 -
Rule Iran 2040 2041 - Mar 20 24:00 1:00 -
Rule Iran 2040 2041 - Sep 20 24:00 0 -
Rule Iran 2042 2043 - Mar 21 24:00 1:00 -
Rule Iran 2042 2043 - Sep 21 24:00 0 -
Rule Iran 2044 2045 - Mar 20 24:00 1:00 -
Rule Iran 2044 2045 - Sep 20 24:00 0 -
Rule Iran 2046 2047 - Mar 21 24:00 1:00 -
Rule Iran 2046 2047 - Sep 21 24:00 0 -
Rule Iran 2048 2049 - Mar 20 24:00 1:00 -
Rule Iran 2048 2049 - Sep 20 24:00 0 -
Rule Iran 2050 2051 - Mar 21 24:00 1:00 -
Rule Iran 2050 2051 - Sep 21 24:00 0 -
Rule Iran 2052 2053 - Mar 20 24:00 1:00 -
Rule Iran 2052 2053 - Sep 20 24:00 0 -
Rule Iran 2054 2055 - Mar 21 24:00 1:00 -
Rule Iran 2054 2055 - Sep 21 24:00 0 -
Rule Iran 2056 2057 - Mar 20 24:00 1:00 -
Rule Iran 2056 2057 - Sep 20 24:00 0 -
Rule Iran 2058 2059 - Mar 21 24:00 1:00 -
Rule Iran 2058 2059 - Sep 21 24:00 0 -
Rule Iran 2060 2062 - Mar 20 24:00 1:00 -
Rule Iran 2060 2062 - Sep 20 24:00 0 -
Rule Iran 2063 only - Mar 21 24:00 1:00 -
Rule Iran 2063 only - Sep 21 24:00 0 -
Rule Iran 2064 2066 - Mar 20 24:00 1:00 -
Rule Iran 2064 2066 - Sep 20 24:00 0 -
Rule Iran 2067 only - Mar 21 24:00 1:00 -
Rule Iran 2067 only - Sep 21 24:00 0 -
Rule Iran 2068 2070 - Mar 20 24:00 1:00 -
Rule Iran 2068 2070 - Sep 20 24:00 0 -
Rule Iran 2071 only - Mar 21 24:00 1:00 -
Rule Iran 2071 only - Sep 21 24:00 0 -
Rule Iran 2072 2074 - Mar 20 24:00 1:00 -
Rule Iran 2072 2074 - Sep 20 24:00 0 -
Rule Iran 2075 only - Mar 21 24:00 1:00 -
Rule Iran 2075 only - Sep 21 24:00 0 -
Rule Iran 2076 2078 - Mar 20 24:00 1:00 -
Rule Iran 2076 2078 - Sep 20 24:00 0 -
Rule Iran 2079 only - Mar 21 24:00 1:00 -
Rule Iran 2079 only - Sep 21 24:00 0 -
Rule Iran 2080 2082 - Mar 20 24:00 1:00 -
Rule Iran 2080 2082 - Sep 20 24:00 0 -
Rule Iran 2083 only - Mar 21 24:00 1:00 -
Rule Iran 2083 only - Sep 21 24:00 0 -
Rule Iran 2084 2086 - Mar 20 24:00 1:00 -
Rule Iran 2084 2086 - Sep 20 24:00 0 -
Rule Iran 2087 only - Mar 21 24:00 1:00 -
Rule Iran 2087 only - Sep 21 24:00 0 -
#
# The following rules are approximations starting in the year 2038.
# These are the best post-2037 approximations available, given the
# restrictions of a single rule using a Gregorian-based data format.
# The following rules are approximations starting in the year 2088.
# These are the best post-2088 approximations available, given the
# restrictions of a single rule using ordinary Gregorian dates.
# At some point this table will need to be extended, though quite
# possibly Iran will change the rules first.
Rule Iran 2036 max - Mar 21 0:00 1:00 -
Rule Iran 2036 max - Sep 21 0:00 0 -
Rule Iran 2088 max - Mar 20 24:00 1:00 -
Rule Iran 2088 max - Sep 20 24:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tehran 3:25:44 - LMT 1916
@ -1456,6 +1643,24 @@ Rule Zion 1974 only - Jul 7 0:00 1:00 D
Rule Zion 1974 only - Oct 13 0:00 0 S
Rule Zion 1975 only - Apr 20 0:00 1:00 D
Rule Zion 1975 only - Aug 31 0:00 0 S
# From Alois Treindl (2019-03-06):
# http://www.moin.gov.il/Documents/שעון קיץ/clock-50-years-7-2014.pdf
# From Isaac Starkman (2019-03-06):
# Summer time was in that period in 1980 and 1984, see
# https://www.ynet.co.il/articles/0,7340,L-3951073,00.html
# You can of course read it in translation.
# I checked the local newspapers for that years.
# It started on midnight and end at 01.00 am.
# From Paul Eggert (2019-03-06):
# Also see this thread about the moin.gov.il URL:
# https://mm.icann.org/pipermail/tz/2018-November/027194.html
Rule Zion 1980 only - Aug 2 0:00 1:00 D
Rule Zion 1980 only - Sep 13 1:00 0 S
Rule Zion 1984 only - May 5 0:00 1:00 D
Rule Zion 1984 only - Aug 25 1:00 0 S
# From Shanks & Pottenger:
Rule Zion 1985 only - Apr 14 0:00 1:00 D
Rule Zion 1985 only - Sep 15 0:00 0 S
Rule Zion 1986 only - May 18 0:00 1:00 D
@ -1714,7 +1919,9 @@ Rule Japan 1950 1951 - May Sat>=1 24:00 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tokyo 9:18:59 - LMT 1887 Dec 31 15:00u
9:00 Japan J%sT
# Since 1938, all Japanese possessions have been like Asia/Tokyo.
# Since 1938, all Japanese possessions have been like Asia/Tokyo,
# except that Truk (Chuuk), Ponape (Pohnpei), and Jaluit (Kosrae) did not
# switch from +10 to +09 until 1941-04-01; see the 'australasia' file.
# Jordan
#
@ -2004,8 +2211,10 @@ Zone Asia/Amman 2:23:44 - LMT 1931
# and in Byalokoz) lists Ural river (plus 10 versts on its left bank) in
# the third time belt (before 1930 this means +03).
# From Paul Eggert (2016-12-06):
# The tables below reflect Golosunov's remarks, with exceptions as noted.
# From Alexander Konzurovski (2018-12-20):
# Qyzyolrda Region (Asia/Qyzylorda) is changing its time zone from
# UTC+6 to UTC+5 effective December 21st, 2018. The legal document is
# located here: http://adilet.zan.kz/rus/docs/P1800000817 (russian language).
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#
@ -2019,8 +2228,6 @@ Zone Asia/Almaty 5:07:48 - LMT 1924 May 2 # or Alma-Ata
6:00 RussiaAsia +06/+07 2004 Oct 31 2:00s
6:00 - +06
# Qyzylorda (aka Kyzylorda, Kizilorda, Kzyl-Orda, etc.) (KZ-KZY)
# This currently includes Qostanay (aka Kostanay, Kustanay) (KZ-KUS);
# see comments below.
Zone Asia/Qyzylorda 4:21:52 - LMT 1924 May 2
4:00 - +04 1930 Jun 21
5:00 - +05 1981 Apr 1
@ -2031,21 +2238,22 @@ Zone Asia/Qyzylorda 4:21:52 - LMT 1924 May 2
5:00 RussiaAsia +05/+06 1992 Jan 19 2:00s
6:00 RussiaAsia +06/+07 1992 Mar 29 2:00s
5:00 RussiaAsia +05/+06 2004 Oct 31 2:00s
6:00 - +06
# The following zone is like Asia/Qyzylorda except for being one
# hour earlier from 1991-09-29 to 1992-03-29. The 1991/2 rules for
# Qostanay are unclear partly because of the 1997 Turgai
# reorganization, so this zone is commented out for now.
#Zone Asia/Qostanay 4:14:20 - LMT 1924 May 2
# 4:00 - +04 1930 Jun 21
# 5:00 - +05 1981 Apr 1
# 5:00 1:00 +06 1981 Oct 1
# 6:00 - +06 1982 Apr 1
# 5:00 RussiaAsia +05/+06 1991 Mar 31 2:00s
# 4:00 RussiaAsia +04/+05 1992 Jan 19 2:00s
# 5:00 RussiaAsia +05/+06 2004 Oct 31 2:00s
# 6:00 - +06
6:00 - +06 2018 Dec 21 0:00
5:00 - +05
#
# Qostanay (aka Kostanay, Kustanay) (KZ-KUS)
# The 1991/2 rules are unclear partly because of the 1997 Turgai
# reorganization.
Zone Asia/Qostanay 4:14:28 - LMT 1924 May 2
4:00 - +04 1930 Jun 21
5:00 - +05 1981 Apr 1
5:00 1:00 +06 1981 Oct 1
6:00 - +06 1982 Apr 1
5:00 RussiaAsia +05/+06 1991 Mar 31 2:00s
4:00 RussiaAsia +04/+05 1992 Jan 19 2:00s
5:00 RussiaAsia +05/+06 2004 Oct 31 2:00s
6:00 - +06
# Aqtöbe (aka Aktobe, formerly Aktyubinsk) (KZ-AKT)
Zone Asia/Aqtobe 3:48:40 - LMT 1924 May 2
4:00 - +04 1930 Jun 21
@ -2139,21 +2347,43 @@ Zone Asia/Bishkek 4:58:24 - LMT 1924 May 2
# started at June 1 in that year. For another example, the article in
# 1988 said that DST started at 2:00 AM in that year.
# From Phake Nick (2018-10-27):
# 1. According to official announcement from Korean government, the DST end
# date in South Korea should be
# 1955-09-08 without specifying time
# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027977557
# 1956-09-29 without specifying time
# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027978341
# 1957-09-21 24 o'clock
# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027979690#3
# 1958-09-20 24 o'clock
# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027981189
# 1959-09-19 24 o'clock
# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027982974#2
# 1960-09-17 24 o'clock
# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0028044104
# ...
# 2.... https://namu.wiki/w/대한민국%20표준시 ... [says]
# when Korea was using GMT+8:30 as standard time, the international
# aviation/marine/meteorological industry in the country refused to
# follow and continued to use GMT+9:00 for interoperability.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule ROK 1948 only - Jun 1 0:00 1:00 D
Rule ROK 1948 only - Sep 13 0:00 0 S
Rule ROK 1949 only - Apr 3 0:00 1:00 D
Rule ROK 1949 1951 - Sep Sun>=8 0:00 0 S
Rule ROK 1950 only - Apr 1 0:00 1:00 D
Rule ROK 1951 only - May 6 0:00 1:00 D
Rule ROK 1955 only - May 5 0:00 1:00 D
Rule ROK 1955 only - Sep 9 0:00 0 S
Rule ROK 1956 only - May 20 0:00 1:00 D
Rule ROK 1956 only - Sep 30 0:00 0 S
Rule ROK 1957 1960 - May Sun>=1 0:00 1:00 D
Rule ROK 1957 1960 - Sep Sun>=18 0:00 0 S
Rule ROK 1987 1988 - May Sun>=8 2:00 1:00 D
Rule ROK 1987 1988 - Oct Sun>=8 3:00 0 S
Rule ROK 1948 only - Jun 1 0:00 1:00 D
Rule ROK 1948 only - Sep 12 24:00 0 S
Rule ROK 1949 only - Apr 3 0:00 1:00 D
Rule ROK 1949 1951 - Sep Sat>=7 24:00 0 S
Rule ROK 1950 only - Apr 1 0:00 1:00 D
Rule ROK 1951 only - May 6 0:00 1:00 D
Rule ROK 1955 only - May 5 0:00 1:00 D
Rule ROK 1955 only - Sep 8 24:00 0 S
Rule ROK 1956 only - May 20 0:00 1:00 D
Rule ROK 1956 only - Sep 29 24:00 0 S
Rule ROK 1957 1960 - May Sun>=1 0:00 1:00 D
Rule ROK 1957 1960 - Sep Sat>=17 24:00 0 S
Rule ROK 1987 1988 - May Sun>=8 2:00 1:00 D
Rule ROK 1987 1988 - Oct Sun>=8 3:00 0 S
# From Paul Eggert (2016-08-23):
# The Korean Wikipedia entry gives the following sources for UT offsets:
@ -2882,9 +3112,15 @@ Zone Asia/Karachi 4:28:12 - LMT 1907
# the official website, though the decree did not specify the exact
# time of the time shift.
# http://www.palestinecabinet.gov.ps/Website/AR/NDecrees/ViewFile.ashx?ID=e7a42ab7-ee23-435a-b9c8-a4f7e81f3817
# From Even Scharning (2019-03-23):
# DST in Palestine will start on 30 March this year, not 23 March as the time
# zone database predicted.
# https://ramallah.news/post/123610
#
# From Paul Eggert (2018-03-16):
# For 2016 on, predict spring transitions on March's fourth Saturday at 01:00.
# From Tim Parenti (2019-03-23):
# Combining this with the rules observed since 2016, adjust our spring
# transition guess to Mar Sat>=24.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EgyptAsia 1957 only - May 10 0:00 1:00 S
@ -2915,7 +3151,7 @@ Rule Palestine 2012 only - Sep 21 1:00 0 -
Rule Palestine 2013 only - Sep Fri>=21 0:00 0 -
Rule Palestine 2014 2015 - Oct Fri>=21 0:00 0 -
Rule Palestine 2015 only - Mar lastFri 24:00 1:00 S
Rule Palestine 2016 max - Mar Sat>=22 1:00 1:00 S
Rule Palestine 2016 max - Mar Sat>=24 1:00 1:00 S
Rule Palestine 2016 max - Oct lastSat 1:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
@ -2943,6 +3179,11 @@ Zone Asia/Hebron 2:20:23 - LMT 1900 Oct
# no information
# Philippines
# From Paul Eggert (2018-11-18):
# The Spanish initially used American (west-of-Greenwich) time.
# It is unknown what time Manila kept when the British occupied it from
# 1762-10-06 through 1764-04; for now assume it kept American time.
# On 1844-08-16, Narciso Clavería, governor-general of the
# Philippines, issued a proclamation announcing that 1844-12-30 was to
# be immediately followed by 1845-01-01; see R.H. van Gent's
@ -3028,8 +3269,8 @@ Link Asia/Qatar Asia/Bahrain
# going to run on Higgins Time.' And so, until last year, it did." See:
# Antar E. Dinner at When? Saudi Aramco World, 1969 March/April. 2-3.
# http://archive.aramcoworld.com/issue/196902/dinner.at.when.htm
# newspapers.com says a similar story about Higgins was published in the Port
# Angeles (WA) Evening News, 1965-03-10, page 5, but I lack access to the text.
# Also see: Antar EN. Arabian flying is confusing.
# Port Angeles (WA) Evening News. 1965-03-10. page 3.
#
# The TZ database cannot represent quasi-solar time; airline time is the best
# we can do. The 1946 foreign air news digest of the U.S. Civil Aeronautics
@ -3402,5 +3643,17 @@ Zone Asia/Ho_Chi_Minh 7:06:40 - LMT 1906 Jul 1
8:00 - +08 1975 Jun 13
7:00 - +07
# From Paul Eggert (2019-02-19):
#
# The Ho Chi Minh entry suffices for most purposes as it agrees with all of
# Vietnam since 1975-06-13. Presumably clocks often changed in south Vietnam
# in the early 1970s as locations changed hands during the war; however the
# details are unknown and would likely be too voluminous for this database.
#
# For timestamps in north Vietnam back to 1970 (the tzdb cutoff),
# use Asia/Bangkok; see the VN entries in the file zone1970.tab.
# For timestamps before 1970, see Asia/Hanoi in the file 'backzone'.
# Yemen
# See Asia/Riyadh.

@ -425,10 +425,44 @@ Zone Pacific/Tahiti -9:58:16 - LMT 1912 Oct # Papeete
# it is uninhabited.
# Guam
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# http://guamlegislature.com/Public_Laws_5th/PL05-025.pdf
# http://documents.guam.gov/wp-content/uploads/E.O.-59-7-Guam-Daylight-Savings-Time-May-6-1959.pdf
Rule Guam 1959 only - Jun 27 2:00 1:00 D
# http://documents.guam.gov/wp-content/uploads/E.O.-61-5-Revocation-of-Daylight-Saving-Time-and-Restoratio.pdf
Rule Guam 1961 only - Jan 29 2:00 0 S
# http://documents.guam.gov/wp-content/uploads/E.O.-67-13-Guam-Daylight-Savings-Time.pdf
Rule Guam 1967 only - Sep 1 2:00 1:00 D
# http://documents.guam.gov/wp-content/uploads/E.O.-69-2-Repeal-of-Guam-Daylight-Saving-Time.pdf
Rule Guam 1969 only - Jan 26 0:01 0 S
# http://documents.guam.gov/wp-content/uploads/E.O.-69-10-Guam-Daylight-Saving-Time.pdf
Rule Guam 1969 only - Jun 22 2:00 1:00 D
Rule Guam 1969 only - Aug 31 2:00 0 S
# http://documents.guam.gov/wp-content/uploads/E.O.-70-10-Guam-Daylight-Saving-Time.pdf
# http://documents.guam.gov/wp-content/uploads/E.O.-70-30-End-of-Guam-Daylight-Saving-Time.pdf
# http://documents.guam.gov/wp-content/uploads/E.O.-71-5-Guam-Daylight-Savings-Time.pdf
Rule Guam 1970 1971 - Apr lastSun 2:00 1:00 D
Rule Guam 1970 1971 - Sep Sun>=1 2:00 0 S
# http://documents.guam.gov/wp-content/uploads/E.O.-73-28.-Guam-Day-light-Saving-Time.pdf
Rule Guam 1973 only - Dec 16 2:00 1:00 D
# http://documents.guam.gov/wp-content/uploads/E.O.-74-7-Guam-Daylight-Savings-Time-Rescinded.pdf
Rule Guam 1974 only - Feb 24 2:00 0 S
# http://documents.guam.gov/wp-content/uploads/E.O.-76-13-Daylight-Savings-Time.pdf
Rule Guam 1976 only - May 26 2:00 1:00 D
# http://documents.guam.gov/wp-content/uploads/E.O.-76-25-Revocation-of-E.O.-76-13.pdf
Rule Guam 1976 only - Aug 22 2:01 0 S
# http://documents.guam.gov/wp-content/uploads/E.O.-77-4-Daylight-Savings-Time.pdf
Rule Guam 1977 only - Apr 24 2:00 1:00 D
# http://documents.guam.gov/wp-content/uploads/E.O.-77-18-Guam-Standard-Time.pdf
Rule Guam 1977 only - Aug 28 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Guam -14:21:00 - LMT 1844 Dec 31
9:39:00 - LMT 1901 # Agana
10:00 - GST 2000 Dec 23 # Guam
10:00 - GST 1941 Dec 10 # Guam
9:00 - +09 1944 Jul 31
10:00 Guam G%sT 2000 Dec 23
10:00 - ChST # Chamorro Standard Time
Link Pacific/Guam Pacific/Saipan # N Mariana Is
@ -450,31 +484,56 @@ Zone Pacific/Kiritimati -10:29:20 - LMT 1901
# Marshall Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Majuro 11:24:48 - LMT 1901
11:00 - +11 1969 Oct
12:00 - +12
Zone Pacific/Kwajalein 11:09:20 - LMT 1901
11:00 - +11 1969 Oct
-12:00 - -12 1993 Aug 20
12:00 - +12
Zone Pacific/Majuro 11:24:48 - LMT 1901
11:00 - +11 1914 Oct
9:00 - +09 1919 Feb 1
11:00 - +11 1937
10:00 - +10 1941 Apr 1
9:00 - +09 1944 Jan 30
11:00 - +11 1969 Oct
12:00 - +12
Zone Pacific/Kwajalein 11:09:20 - LMT 1901
11:00 - +11 1937
10:00 - +10 1941 Apr 1
9:00 - +09 1944 Feb 6
11:00 - +11 1969 Oct
-12:00 - -12 1993 Aug 20 24:00
12:00 - +12
# Micronesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Chuuk 10:07:08 - LMT 1901
10:00 - +10
Zone Pacific/Pohnpei 10:32:52 - LMT 1901 # Kolonia
11:00 - +11
Zone Pacific/Kosrae 10:51:56 - LMT 1901
11:00 - +11 1969 Oct
12:00 - +12 1999
11:00 - +11
Zone Pacific/Chuuk -13:52:52 - LMT 1844 Dec 31
10:07:08 - LMT 1901
10:00 - +10 1914 Oct
9:00 - +09 1919 Feb 1
10:00 - +10 1941 Apr 1
9:00 - +09 1945 Aug
10:00 - +10
Zone Pacific/Pohnpei -13:27:08 - LMT 1844 Dec 31 # Kolonia
10:32:52 - LMT 1901
11:00 - +11 1914 Oct
9:00 - +09 1919 Feb 1
11:00 - +11 1937
10:00 - +10 1941 Apr 1
9:00 - +09 1945 Aug
11:00 - +11
Zone Pacific/Kosrae -13:08:04 - LMT 1844 Dec 31
10:51:56 - LMT 1901
11:00 - +11 1914 Oct
9:00 - +09 1919 Feb 1
11:00 - +11 1937
10:00 - +10 1941 Apr 1
9:00 - +09 1945 Aug
11:00 - +11 1969 Oct
12:00 - +12 1999
11:00 - +11
# Nauru
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Nauru 11:07:40 - LMT 1921 Jan 15 # Uaobe
11:30 - +1130 1942 Mar 15
9:00 - +09 1944 Aug 15
11:30 - +1130 1979 May
11:30 - +1130 1942 Aug 29
9:00 - +09 1945 Sep 8
11:30 - +1130 1979 Feb 10 2:00
12:00 - +12
# New Caledonia
@ -575,8 +634,9 @@ Zone Pacific/Norfolk 11:11:52 - LMT 1901 # Kingston
# Palau (Belau)
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Palau 8:57:56 - LMT 1901 # Koror
9:00 - +09
Zone Pacific/Palau -15:02:04 - LMT 1844 Dec 31 # Koror
8:57:56 - LMT 1901
9:00 - +09
# Papua New Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
@ -838,7 +898,7 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# tz@iana.org for general use in the future). For more, please see
# the file CONTRIBUTING in the tz distribution.
# From Paul Eggert (2017-02-10):
# From Paul Eggert (2018-11-18):
#
# Unless otherwise specified, the source for data through 1990 is:
# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
@ -863,6 +923,7 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# I invented the abbreviation marked "*".
# The following abbreviations are from other sources.
# Corrections are welcome!
# std dst
@ -870,7 +931,7 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# 8:00 AWST AWDT Western Australia
# 9:30 ACST ACDT Central Australia
# 10:00 AEST AEDT Eastern Australia
# 10:00 GST Guam through 2000
# 10:00 GST GDT* Guam through 2000
# 10:00 ChST Chamorro
# 11:30 NZMT NZST New Zealand through 1945
# 12:00 NZST NZDT New Zealand 1946-present
@ -1569,28 +1630,70 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# Kwajalein
# In comp.risks 14.87 (26 August 1993), Peter Neumann writes:
# I wonder what happened in Kwajalein, where there was NO Friday,
# 1993-08-20. Thursday night at midnight Kwajalein switched sides with
# respect to the International Date Line, to rejoin its fellow islands,
# going from 11:59 p.m. Thursday to 12:00 m. Saturday in a blink.
# From an AP article (1993-08-22):
# "The nearly 3,000 Americans living on this remote Pacific atoll have a good
# excuse for not remembering Saturday night: there wasn't one. Residents were
# going to bed Friday night and waking up Sunday morning because at midnight
# -- 8 A.M. Eastern daylight time on Saturday -- Kwajalein was jumping from
# one side of the international date line to the other."
# "In Marshall Islands, Friday is followed by Sunday", NY Times. 1993-08-22.
# https://www.nytimes.com/1993/08/22/world/in-marshall-islands-friday-is-followed-by-sunday.html
# From Phake Nick (2018-10-27):
# <https://wiki.suikawiki.org/n/南洋群島の標準時> ... pointed out that
# currently tzdata say Pacific/Kwajalein switched from GMT+11 to GMT-12 in
# 1969 October without explanation, however an 1993 article from NYT say it
# synchorized its day with US mainland about 40 years ago and thus the switch
# should occur at around 1950s instead.
#
# From Paul Eggert (2018-11-18):
# The NYT (actually, AP) article is vague and possibly wrong about this.
# The article says the earlier switch was "40 years ago when the United States
# Army established a missile test range here". However, the Kwajalein Test
# Center was established on 1960-10-01 and was run by the US Navy. It was
# transferred to the US Army on 1964-07-01. See "Seize the High Ground"
# <https://history.army.mil/html/books/070/70-88-1/cmhPub_70-88-1.pdf>.
# Given that Shanks was right on the money about the 1993 change, I'm inclined
# to take Shanks's word for the 1969 change unless we find better evidence.
# N Mariana Is, Guam
# From Phake Nick (2018-10-27):
# Guam Island was briefly annexed by Japan during ... year 1941-1944 ...
# however there are no detailed information about what time it use during that
# period. It would probably be reasonable to assume Guam use GMT+9 during
# that period of time like the surrounding area.
# From Paul Eggert (2018-11-18):
# Howse writes (p 153) "The Spaniards, on the other hand, reached the
# Philippines and the Ladrones from America," and implies that the Ladrones
# (now called the Marianas) kept American date for quite some time.
# For now, we assume the Ladrones switched at the same time as the Philippines;
# see Asia/Manila.
#
# Use 1941-12-10 and 1944-07-31 for Guam WWII transitions, as the rough start
# and end of Japanese control of Agana. We don't know whether the Northern
# Marianas followed Guam's DST rules from 1959 through 1977; for now, assume
# they did as that avoids the need for a separate zone due to our 1970 cutoff.
#
# US Public Law 106-564 (2000-12-23) made UT +10 the official standard time,
# under the name "Chamorro Standard Time". There is no official abbreviation,
# but Congressman Robert A. Underwood, author of the bill that became law,
# wrote in a press release (2000-12-27) that he will seek the use of "ChST".
# See also the commentary for Micronesia.
# Micronesia
# Marshall Is
# See the commentary for Micronesia.
# Micronesia (and nearby)
# From Paul Eggert (2018-11-18):
# Like the Ladrones (see Guam commentary), assume the Spanish East Indies
# kept American time until the Philippines switched at the end of 1844.
# Alan Eugene Davis writes (1996-03-16),
# "I am certain, having lived there for the past decade, that 'Truk'
@ -1606,6 +1709,95 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# that Truk and Yap are UT +10, and Ponape and Kosrae are +11.
# We don't know when Kosrae switched from +12; assume January 1 for now.
# From Phake Nick (2018-10-27):
#
# From a Japanese wiki site https://wiki.suikawiki.org/n/南洋群島の標準時
# ...
# For "Southern Islands" (modern region of Mariana + Palau + Federation of
# Micronesia + Marshall Islands):
#
# A 1906 Japanese magazine shown the Caroline Islands and Mariana Islands
# who was occupied by Germany at the time as GMT+10, together with the like
# of German New Guinea. However there is a marking saying it have not been
# implemented (yet). No further information after that were found.
#
# Japan invaded those islands in 1914, and records shows that they were
# instructed to use JST at the time.
#
# 1915 January telecommunication record on the Jaluit Atoll shows they use
# the meridian of 170E as standard time (GMT+11:20), which is similar to the
# longitude of the atoll.
# 1915 February record say the 170E standard time is to be used until
# February 9 noon, and after February 9 noon they are to use JST.
# However these are time used within the Japanese Military at the time and
# probably does not reflect the time used by local resident at the time (that
# is if they keep their own time back then)
#
# In January 1919 the occupying force issued a command that split the area
# into three different timezone with meridian of 135E, 150E, 165E (JST+0, +1,
# +2), and the command was to become effective from February 1 of the same
# year. Despite the target of the command is still only for the occupying
# force itself, further publication have described the time as the standard
# time for the occupied area and thus it can probably be seen as such.
# * Area that use meridian of 135E: Palau and Yap civil administration area
# (Southern Islands Western Standard Time)
# * Area that use meridian of 150E: Truk (Chuuk) and Saipan civil
# administration area (Southern Islands Central Standard Time)
# * Area that use meridian of 165E: Ponape (Pohnpei) and Jaluit civil
# administration area (Southern Islands Eastern Standard Time).
# * In the next few years Japanese occupation of those islands have been
# formalized via League of Nation Mandate (South Pacific Mandate) and formal
# governance structure have been established, these district [become
# subprefectures] and timezone classification have been inherited as standard
# time of the area.
# * Saipan subprefecture include Mariana islands (exclude Guam which was
# occupied by America at the time), Palau and Yap subprefecture rule the
# Western Caroline Islands with 137E longitude as border, Truk and Ponape
# subprefecture rule the Eastern Caroline Islands with 154E as border, Ponape
# subprefecture also rule part of Marshall Islands to the west of 164E
# starting from (1918?) and Jaluit subprefecture rule the rest of the
# Marshall Islands.
#
# And then in year 1937, an announcement was made to change the time in the
# area into 2 timezones:
# * Area that use meridian of 135E: area administered by Palau, Yap and
# Saipan subprefecture (Southern Islands Western Standard Time)
# * Area that use meridian of 150E: area administered by Truk (Chuuk),
# Ponape (Pohnpei) and Jaluit subprefecture (Southern Islands Eastern
# Standard Time)
#
# Another announcement issued in 1941 say that on April 1 that year,
# standard time of the Southern Islands would be changed to use the meridian
# of 135E (GMT+9), and thus abolishing timezone different within the area.
#
# Then Pacific theater of WWII started and Japan slowly lose control on the
# island. The webpage I linked above contain no information during this
# period of time....
#
# After the end of WWII, in 1946 February, a document written by the
# (former?) Japanese military personnel describe there are 3 hours time
# different between Caroline islands time/Wake island time and the Chungking
# time, which would mean the time being used there at the time was GMT+10.
#
# After that, the area become Trust Territories of the Pacific Islands
# under American administration from year 1947. The site listed some
# American/International books/maps/publications about time used in those
# area during this period of time but they doesn't seems to be reliable
# information so it would be the best if someone know where can more reliable
# information can be found.
#
#
# From Paul Eggert (2018-11-18):
#
# For the above, use vague dates like "1914" and "1945" for transitions that
# plausibly exist but for which the details are not known. The information
# for Wake is too sketchy to act on.
#
# The 1906 GMT+10 info about German-controlled islands might not have been
# done, so omit it from the data for now.
#
# The Jaluit info governs Kwajalein.
# Midway
@ -1623,6 +1815,29 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# started DST on June 3. Possibly DST was observed other years
# in Midway, but we have no record of it.
# Nauru
# From Phake Nick (2018-10-31):
# Currently, the tz database say Nauru use LMT until 1921, and then
# switched to GMT+11:30 for the next two decades.
# However, a number of timezone map published in America/Japan back then
# showed its timezone as GMT+11 per https://wiki.suikawiki.org/n/ナウルの標準時
# And it would also be nice if the 1921 transition date could be sourced.
# ...
# The "Nauru Standard Time Act 1978 Time Change"
# http://ronlaw.gov.nr/nauru_lpms/files/gazettes/4b23a17d2030150404db7a5fa5872f52.pdf#page=3
# based on "Nauru Standard Time Act 1978 Time Change"
# http://www.paclii.org/nr/legis/num_act/nsta1978207/ defined that "Nauru
# Alternative Time" (GMT+12) should be in effect from 1979 Feb.
#
# From Paul Eggert (2018-11-19):
# The 1921-01-15 introduction of standard time is in Shanks; it is also in
# "Standard Time Throughout the World", US National Bureau of Standards (1935),
# page 3, which does not give the UT offset. In response to a comment by
# Phake Nick I set the Nauru time of occupation by Japan to
# 1942-08-29/1945-09-08 by using dates from:
# https://en.wikipedia.org/wiki/Japanese_occupation_of_Nauru
# Norfolk
# From Alexander Krivenyshev (2015-09-23):
@ -1638,6 +1853,9 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# other than in 1974/5. See:
# https://www.timeanddate.com/time/australia/norfolk-island.html
# Palau
# See commentary for Micronesia.
# Pitcairn
# From Rives McDow (1999-11-08):
@ -1802,6 +2020,9 @@ Zone Pacific/Wallis 12:15:20 - LMT 1901
# From Paul Eggert (2003-03-23):
# We have no other report of DST in Wake Island, so omit this info for now.
# See also the commentary for Micronesia.
###############################################################################
# The International Date Line

@ -100,6 +100,7 @@ Link Pacific/Easter Chile/EasterIsland
Link America/Havana Cuba
Link Africa/Cairo Egypt
Link Europe/Dublin Eire
Link Etc/UTC Etc/UCT
Link Europe/London Europe/Belfast
Link Europe/Chisinau Europe/Tiraspol
Link Europe/London GB
@ -134,7 +135,7 @@ Link Asia/Taipei ROC
Link Asia/Seoul ROK
Link Asia/Singapore Singapore
Link Europe/Istanbul Turkey
Link Etc/UCT UCT
Link Etc/UTC UCT
Link America/Anchorage US/Alaska
Link America/Adak US/Aleutian
Link America/Phoenix US/Arizona

@ -42,7 +42,6 @@
Zone Etc/GMT 0 - GMT
Zone Etc/UTC 0 - UTC
Zone Etc/UCT 0 - UCT
# The following link uses older naming conventions,
# but it belongs here, not in the file 'backward',

@ -1878,7 +1878,7 @@ Zone Europe/Luxembourg 0:24:36 - LMT 1904 Jun
1:00 Belgium CE%sT 1977
1:00 EU CE%sT
# Macedonia
# North Macedonia
# See Europe/Belgrade.
# Malta
@ -3382,7 +3382,7 @@ Zone Europe/Belgrade 1:22:00 - LMT 1884
Link Europe/Belgrade Europe/Ljubljana # Slovenia
Link Europe/Belgrade Europe/Podgorica # Montenegro
Link Europe/Belgrade Europe/Sarajevo # Bosnia and Herzegovina
Link Europe/Belgrade Europe/Skopje # Macedonia
Link Europe/Belgrade Europe/Skopje # North Macedonia
Link Europe/Belgrade Europe/Zagreb # Croatia
# Slovakia

@ -32,8 +32,8 @@
# All text uses UTF-8 encoding. The columns of the table are as follows:
#
# 1. ISO 3166-1 alpha-2 country code, current as of
# ISO 3166-1 N905 (2016-11-15). See: Updates on ISO 3166-1
# http://isotc.iso.org/livelink/livelink/Open/16944257
# ISO 3166-1 N976 (2018-11-06). See: Updates on ISO 3166-1
# https://isotc.iso.org/livelink/livelink/Open/16944257
# 2. The usual English name for the coded region,
# chosen so that alphabetic sorting of subsets produces helpful lists.
# This is not the same as the English name in the ISO 3166 tables.
@ -189,7 +189,7 @@ ME Montenegro
MF St Martin (French)
MG Madagascar
MH Marshall Islands
MK Macedonia
MK North Macedonia
ML Mali
MM Myanmar (Burma)
MN Mongolia
@ -258,7 +258,7 @@ ST Sao Tome & Principe
SV El Salvador
SX St Maarten (Dutch)
SY Syria
SZ Swaziland
SZ Eswatini (Swaziland)
TC Turks & Caicos Is
TD Chad
TF French Southern & Antarctic Lands

@ -42,9 +42,12 @@
# See: Levine J. Coordinated Universal Time and the leap second.
# URSI Radio Sci Bull. 2016;89(4):30-6. doi:10.23919/URSIRSB.2016.7909995
# <https://ieeexplore.ieee.org/document/7909995>.
# There were no leap seconds before 1972, because the official mechanism
# accounting for the discrepancy between atomic time and the earth's rotation
# did not exist.
# did not exist. The first ("1 Jan 1972") data line in leap-seconds.list
# does not denote a leap second; it denotes the start of the current definition
# of UTC.
# The correction (+ or -) is made at the given time, so lines
# will typically look like:
@ -83,7 +86,7 @@ Leap 2016 Dec 31 23:59:60 + S
# POSIX timestamps for the data in this file:
#updated 1467936000
#expires 1561680000
#expires 1577491200
# Updated through IERS Bulletin C56
# File expires on: 28 June 2019
# Updated through IERS Bulletin C57
# File expires on: 28 December 2019

@ -622,6 +622,26 @@ Zone America/Los_Angeles -7:52:58 - LMT 1883 Nov 18 12:07:02
# between AKST and AKDT from now on....
# https://www.krbd.org/2015/10/30/annette-island-times-they-are-a-changing/
# From Ryan Stanley (2018-11-06):
# The Metlakatla community in Alaska has decided not to change its
# clock back an hour starting on November 4th, 2018 (day before yesterday).
# They will be gmtoff=-28800 year-round.
# https://www.facebook.com/141055983004923/photos/pb.141055983004923.-2207520000.1541465673./569081370202380/
# From Paul Eggert (2018-12-16):
# In a 2018-12-11 special election, Metlakatla voted to go back to
# Alaska time (including daylight saving time) starting next year.
# https://www.krbd.org/2018/12/12/metlakatla-to-follow-alaska-standard-time-allow-liquor-sales/
#
# From Ryan Stanley (2019-01-11):
# The community will be changing back on the 20th of this month...
# From Tim Parenti (2019-01-11):
# Per an announcement on the Metlakatla community's official Facebook page, the
# "fall back" will be on Sunday 2019-01-20 at 02:00:
# https://www.facebook.com/141055983004923/photos/607150969728753/
# So they won't be waiting for Alaska to join them on 2019-03-10, but will
# rather change their clocks twice in seven weeks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Juneau 15:02:19 - LMT 1867 Oct 19 15:33:32
-8:57:41 - LMT 1900 Aug 20 12:00
@ -648,6 +668,8 @@ Zone America/Metlakatla 15:13:42 - LMT 1867 Oct 19 15:44:55
-8:00 - PST 1969
-8:00 US P%sT 1983 Oct 30 2:00
-8:00 - PST 2015 Nov 1 2:00
-9:00 US AK%sT 2018 Nov 4 2:00
-8:00 - PST 2019 Jan 20 2:00
-9:00 US AK%sT
Zone America/Yakutat 14:41:05 - LMT 1867 Oct 19 15:12:18
-9:18:55 - LMT 1900 Aug 20 12:00
@ -808,6 +830,22 @@ Zone America/Boise -7:44:49 - LMT 1883 Nov 18 12:15:11
# For a map of Indiana's time zone regions, see:
# https://en.wikipedia.org/wiki/Time_in_Indiana
#
# From Paul Eggert (2018-11-30):
# A brief but entertaining history of time in Indiana describes a 1949 debate
# in the Indiana House where city legislators (who favored "fast time")
# tussled with farm legislators (who didn't) over a bill to outlaw DST:
# "Lacking enough votes, the city faction tries to filibuster until time runs
# out on the session at midnight, but rural champion Rep. Herbert Copeland,
# R-Madison, leans over the gallery railing and forces the official clock
# back to 9 p.m., breaking it in the process. The clock sticks on 9 as the
# debate rages on into the night. The filibuster finally dies out and the
# bill passes, while outside the chamber, clocks read 3:30 a.m. In the end,
# it doesn't matter which side won. The law has no enforcement powers and
# is simply ignored by fast-time communities."
# How Indiana went from 'God's time' to split zones and daylight-saving.
# Indianapolis Star. 2018-11-27 14:58 -05.
# https://www.indystar.com/story/news/politics/2018/11/27/indianapolis-indiana-time-zone-history-central-eastern-daylight-savings-time/2126300002/
#
# From Paul Eggert (2007-08-17):
# Since 1970, most of Indiana has been like America/Indiana/Indianapolis,
# with the following exceptions:

@ -262,6 +262,7 @@ KW +2920+04759 Asia/Kuwait
KY +1918-08123 America/Cayman
KZ +4315+07657 Asia/Almaty Kazakhstan (most areas)
KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda
KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay
KZ +5017+05710 Asia/Aqtobe Aqtobe/Aktobe
KZ +4431+05016 Asia/Aqtau Mangghystau/Mankistau
KZ +4707+05156 Asia/Atyrau Atyrau/Atirau/Gur'yev
@ -355,9 +356,9 @@ RS +4450+02030 Europe/Belgrade
RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad
RU +554521+0373704 Europe/Moscow MSK+00 - Moscow area
RU +4457+03406 Europe/Simferopol MSK+00 - Crimea
RU +4844+04425 Europe/Volgograd MSK+00 - Volgograd
RU +5836+04939 Europe/Kirov MSK+00 - Kirov
RU +4621+04803 Europe/Astrakhan MSK+01 - Astrakhan
RU +4844+04425 Europe/Volgograd MSK+01 - Volgograd
RU +5134+04602 Europe/Saratov MSK+01 - Saratov
RU +5420+04824 Europe/Ulyanovsk MSK+01 - Ulyanovsk
RU +5312+05009 Europe/Samara MSK+01 - Samara, Udmurtia

@ -108,7 +108,7 @@ public class CLDRConverter {
private static final ResourceBundle.Control defCon =
ResourceBundle.Control.getControl(ResourceBundle.Control.FORMAT_DEFAULT);
private static final String[] AVAILABLE_TZIDS = TimeZone.getAvailableIDs();
private static Set<String> AVAILABLE_TZIDS;
private static String zoneNameTempFile;
private static String tzDataDir;
private static final Map<String, String> canonicalTZMap = new HashMap<>();
@ -730,7 +730,7 @@ public class CLDRConverter {
});
}
Arrays.stream(AVAILABLE_TZIDS).forEach(tzid -> {
getAvailableZoneIds().stream().forEach(tzid -> {
// If the tzid is deprecated, get the data for the replacement id
String tzKey = Optional.ofNullable((String)handlerSupplMeta.get(tzid))
.orElse(tzid);
@ -1074,8 +1074,20 @@ public class CLDRConverter {
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
// This method assumes handlerMetaZones is already initialized
private static Set<String> getAvailableZoneIds() {
assert handlerMetaZones != null;
if (AVAILABLE_TZIDS == null) {
AVAILABLE_TZIDS = new HashSet<>(ZoneId.getAvailableZoneIds());
AVAILABLE_TZIDS.addAll(handlerMetaZones.keySet());
AVAILABLE_TZIDS.remove(MetaZonesParseHandler.NO_METAZONE_KEY);
}
return AVAILABLE_TZIDS;
}
private static Stream<String> zidMapEntry() {
return ZoneId.getAvailableZoneIds().stream()
return getAvailableZoneIds().stream()
.map(id -> {
String canonId = canonicalTZMap.getOrDefault(id, id);
String meta = handlerMetaZones.get(canonId);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,8 +30,8 @@ import java.io.*;
class AbstractCommandNode extends AbstractNamedNode {
void document(PrintWriter writer) {
writer.println("<h5 id=\"" + context.whereC + "\">" + name +
" Command (" + nameNode.value() + ")</h5>");
writer.println("<h3 id=\"" + context.whereC + "\">" + name +
" Command (" + nameNode.value() + ")</h3>");
writer.println(comment());
writer.println("<dl>");
for (Node node : components) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,8 +62,8 @@ abstract class AbstractNamedNode extends Node {
}
void document(PrintWriter writer) {
writer.println("<h4 id=\"" + name + "\">" + name +
" Command Set</h4>");
writer.println("<h2 id=\"" + name + "\">" + name +
" Command Set</h2>");
for (Node node : components) {
node.document(writer);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,9 +38,9 @@ class CommandSetNode extends AbstractNamedNode {
}
void document(PrintWriter writer) {
writer.println("<h4 id=\"" + context.whereC + "\">" + name +
writer.println("<h2 id=\"" + context.whereC + "\">" + name +
" Command Set (" +
nameNode.value() + ")</h4>");
nameNode.value() + ")</h2>");
writer.println(comment());
for (Node node : components) {
node.document(writer);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,8 +54,8 @@ class ConstantSetNode extends AbstractNamedNode {
}
void document(PrintWriter writer) {
writer.println("<h4 id=\"" + context.whereC + "\">" + name +
" Constants</h4>");
writer.println("<h2 id=\"" + context.whereC + "\">" + name +
" Constants</h2>");
writer.println(comment());
writer.println("<table><tr>");
writer.println("<th style=\"width: 20%\"><th style=\"width: 5%\"><th style=\"width: 65%\">");

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,11 +52,16 @@ class RootNode extends AbstractNamedNode {
writer.println("</style>");
writer.println("</head>");
writer.println("<body>");
writer.println("<ul role=\"navigation\">");
writer.println("<div class=\"centered\" role=\"banner\">");
writer.println("<h1 id=\"Protocol Details\">Java Debug Wire Protocol Details</h1>");
writer.println("</div>");
writer.println("<nav>");
writer.println("<ul>");
for (Node node : components) {
node.documentIndex(writer);
}
writer.println("</ul>");
writer.println("</nav>");
writer.println("<div role=\"main\">");
for (Node node : components) {
node.document(writer);

@ -5543,7 +5543,7 @@ instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp,
ins_pipe( pipe_slow );
%}
instruct maxF_reduction_reg(regF dst, regF a, regF b, regF xmmt, rRegI tmp, rFlagsReg cr) %{
instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{
predicate(UseAVX > 0 && n->is_reduction());
match(Set dst (MaxF a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@ -5579,7 +5579,7 @@ instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp,
ins_pipe( pipe_slow );
%}
instruct maxD_reduction_reg(regD dst, regD a, regD b, regD xmmt, rRegL tmp, rFlagsReg cr) %{
instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{
predicate(UseAVX > 0 && n->is_reduction());
match(Set dst (MaxD a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@ -5615,7 +5615,7 @@ instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp,
ins_pipe( pipe_slow );
%}
instruct minF_reduction_reg(regF dst, regF a, regF b, regF xmmt, rRegI tmp, rFlagsReg cr) %{
instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{
predicate(UseAVX > 0 && n->is_reduction());
match(Set dst (MinF a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@ -5651,7 +5651,7 @@ instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp,
ins_pipe( pipe_slow );
%}
instruct minD_reduction_reg(regD dst, regD a, regD b, regD xmmt, rRegL tmp, rFlagsReg cr) %{
instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{
predicate(UseAVX > 0 && n->is_reduction());
match(Set dst (MinD a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);

@ -266,6 +266,19 @@ bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
}
#endif // PRODUCT
void ClassLoaderData::clear_claim(int claim) {
for (;;) {
int old_claim = Atomic::load(&_claim);
if ((old_claim & claim) == 0) {
return;
}
int new_claim = old_claim & ~claim;
if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) {
return;
}
}
}
bool ClassLoaderData::try_claim(int claim) {
for (;;) {
int old_claim = Atomic::load(&_claim);

@ -206,16 +206,17 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// The "claim" is typically used to check if oops_do needs to be applied on
// the CLD or not. Most GCs only perform strong marking during the marking phase.
enum {
_claim_none = 0,
_claim_finalizable = 2,
_claim_strong = 3
enum Claim {
_claim_none = 0,
_claim_finalizable = 2,
_claim_strong = 3,
_claim_other = 4
};
void clear_claim() { _claim = 0; }
void clear_claim(int claim);
bool claimed() const { return _claim != 0; }
bool claimed(int claim) const { return (_claim & claim) == claim; }
bool try_claim(int claim);
int get_claim() const { return _claim; }
void set_claim(int claim) { _claim = claim; }
// Computes if the CLD is alive or not. This is safe to call in concurrent
// contexts.

@ -64,6 +64,11 @@ void ClassLoaderDataGraph::clear_claimed_marks() {
}
}
void ClassLoaderDataGraph::clear_claimed_marks(int claim) {
for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
cld->clear_claim(claim);
}
}
// Class iterator used by the compiler. It gets some number of classes at
// a safepoint to decay invocation counters on the methods.
class ClassLoaderDataGraphKlassIteratorStatic {
@ -471,7 +476,7 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
// The CLDs in [_head, _saved_head] were all added during last call to remember_new_clds(true);
ClassLoaderData* curr = _head;
while (curr != _saved_head) {
if (!curr->claimed()) {
if (!curr->claimed(ClassLoaderData::_claim_strong)) {
array->push(curr);
LogTarget(Debug, class, loader, data) lt;
if (lt.is_enabled()) {

@ -68,6 +68,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void clean_module_and_package_info();
static void purge();
static void clear_claimed_marks();
static void clear_claimed_marks(int claim);
// Iteration through CLDG inside a safepoint; GC support
static void cld_do(CLDClosure* cl);
static void cld_unloading_do(CLDClosure* cl);

@ -35,6 +35,7 @@
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
@ -414,14 +415,6 @@ OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
oop* OopStorage::allocate() {
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Note: Without this we might never perform cleanup. As it is,
// cleanup is only requested here, when completing a concurrent
// iteration, or when someone entirely else wakes up the service
// thread, which isn't ideal. But we can't notify in release().
if (reduce_deferred_updates()) {
notify_needs_cleanup();
}
Block* block = block_for_allocation();
if (block == NULL) return NULL; // Block allocation failed.
assert(!block->is_full(), "invariant");
@ -474,23 +467,20 @@ bool OopStorage::try_add_block() {
OopStorage::Block* OopStorage::block_for_allocation() {
assert_lock_strong(_allocation_mutex);
while (true) {
// Use the first block in _allocation_list for the allocation.
Block* block = _allocation_list.head();
if (block != NULL) {
return block;
} else if (reduce_deferred_updates()) {
MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
notify_needs_cleanup();
// Might have added a block to the _allocation_list, so retry.
} else if (try_add_block()) {
block = _allocation_list.head();
assert(block != NULL, "invariant");
return block;
} else if (reduce_deferred_updates()) { // Once more before failure.
MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
notify_needs_cleanup();
} else {
// Successfully added a new block to the list, so retry.
assert(_allocation_list.chead() != NULL, "invariant");
} else if (_allocation_list.chead() != NULL) {
// Trying to add a block failed, but some other thread added to the
// list while we'd dropped the lock over the new block allocation.
} else if (!reduce_deferred_updates()) { // Once more before failure.
// Attempt to add a block failed, no other thread added a block,
// and no deferred updated added a block, then allocation failed.
log_debug(oopstorage, blocks)("%s: failed block allocation", name());
@ -635,7 +625,14 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
owner->record_needs_cleanup();
// Only request cleanup for to-empty transitions, not for from-full.
// There isn't any rush to process from-full transitions. Allocation
// will reduce deferrals before allocating new blocks, so may process
// some. And the service thread will drain the entire deferred list
// if there are any pending to-empty transitions.
if (releasing == old_allocated) {
owner->record_needs_cleanup();
}
log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
_owner->name(), p2i(this));
}
@ -684,7 +681,6 @@ bool OopStorage::reduce_deferred_updates() {
if (is_empty_bitmask(allocated)) {
_allocation_list.unlink(*block);
_allocation_list.push_back(*block);
notify_needs_cleanup();
}
log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
@ -740,11 +736,6 @@ const char* dup_name(const char* name) {
return dup;
}
// Possible values for OopStorage::_needs_cleanup.
const uint needs_cleanup_none = 0; // No cleanup needed.
const uint needs_cleanup_marked = 1; // Requested, but no notification made.
const uint needs_cleanup_notified = 2; // Requested and Service thread notified.
const size_t initial_active_array_size = 8;
OopStorage::OopStorage(const char* name,
@ -758,7 +749,7 @@ OopStorage::OopStorage(const char* name,
_active_mutex(active_mutex),
_allocation_count(0),
_concurrent_iteration_count(0),
_needs_cleanup(needs_cleanup_none)
_needs_cleanup(false)
{
_active_array->increment_refcount();
assert(_active_mutex->rank() < _allocation_mutex->rank(),
@ -796,40 +787,89 @@ OopStorage::~OopStorage() {
FREE_C_HEAP_ARRAY(char, _name);
}
// Called by service thread to check for pending work.
bool OopStorage::needs_delete_empty_blocks() const {
return Atomic::load(&_needs_cleanup) != needs_cleanup_none;
// Managing service thread notifications.
//
// We don't want cleanup work to linger indefinitely, but we also don't want
// to run the service thread too often. We're also very limited in what we
// can do in a release operation, where cleanup work is created.
//
// When a release operation changes a block's state to empty, it records the
// need for cleanup in both the associated storage object and in the global
// request state. A safepoint cleanup task notifies the service thread when
// there may be cleanup work for any storage object, based on the global
// request state. But that notification is deferred if the service thread
// has run recently, and we also avoid duplicate notifications. The service
// thread updates the timestamp and resets the state flags on every iteration.
// Global cleanup request state.
static volatile bool needs_cleanup_requested = false;
// Flag for avoiding duplicate notifications.
static bool needs_cleanup_triggered = false;
// Time after which a notification can be made.
static jlong cleanup_trigger_permit_time = 0;
// Minimum time since last service thread check before notification is
// permitted. The value of 500ms was an arbitrary choice; frequent, but not
// too frequent.
const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
void OopStorage::trigger_cleanup_if_needed() {
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
if (Atomic::load(&needs_cleanup_requested) &&
!needs_cleanup_triggered &&
(os::javaTimeNanos() > cleanup_trigger_permit_time)) {
needs_cleanup_triggered = true;
ml.notify_all();
}
}
bool OopStorage::has_cleanup_work_and_reset() {
assert_lock_strong(Service_lock);
cleanup_trigger_permit_time =
os::javaTimeNanos() + cleanup_trigger_defer_period;
needs_cleanup_triggered = false;
// Set the request flag false and return its old value.
// Needs to be atomic to avoid dropping a concurrent request.
// Can't use Atomic::xchg, which may not support bool.
return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
}
// Record that cleanup is needed, without notifying the Service thread.
// Used by release(), where we can't lock even Service_lock.
void OopStorage::record_needs_cleanup() {
Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none);
}
// Record that cleanup is needed, and notify the Service thread.
void OopStorage::notify_needs_cleanup() {
// Avoid re-notification if already notified.
const uint notified = needs_cleanup_notified;
if (Atomic::xchg(notified, &_needs_cleanup) != notified) {
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
ml.notify_all();
}
// Set local flag first, else service thread could wake up and miss
// the request. This order may instead (rarely) unnecessarily notify.
OrderAccess::release_store(&_needs_cleanup, true);
OrderAccess::release_store_fence(&needs_cleanup_requested, true);
}
bool OopStorage::delete_empty_blocks() {
// Service thread might have oopstorage work, but not for this object.
// Check for deferred updates even though that's not a service thread
// trigger; since we're here, we might as well process them.
if (!OrderAccess::load_acquire(&_needs_cleanup) &&
(OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
Atomic::store(needs_cleanup_none, &_needs_cleanup);
OrderAccess::fence();
OrderAccess::release_store_fence(&_needs_cleanup, false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
// how many updates we'll process and blocks we'll try to release,
// so other threads can't cause an unbounded stay in this function.
size_t limit = block_count();
if (limit == 0) return false; // Empty storage; nothing at all to do.
// We add a bit of slop because the reduce_deferred_updates clause
// can cause blocks to be double counted. If there are few blocks
// and many of them are deferred and empty, we might hit the limit
// and spin the caller without doing very much work. Otherwise,
// we don't normally hit the limit anyway, instead running out of
// work to do.
size_t limit = block_count() + 10;
for (size_t i = 0; i < limit; ++i) {
// Process deferred updates, which might make empty blocks available.
@ -946,8 +986,8 @@ OopStorage::BasicParState::~BasicParState() {
_storage->relinquish_block_array(_active_array);
update_concurrent_iteration_count(-1);
if (_concurrent) {
// We may have deferred some work.
const_cast<OopStorage*>(_storage)->notify_needs_cleanup();
// We may have deferred some cleanup work.
const_cast<OopStorage*>(_storage)->record_needs_cleanup();
}
}

@ -152,18 +152,26 @@ public:
template<bool concurrent, bool is_const> class ParState;
// Service thread cleanup support.
// Stops deleting if there is an in-progress concurrent iteration.
// Locks both the _allocation_mutex and the _active_mutex, and may
// safepoint. Deletion may be throttled, with only some available
// work performed, in order to allow other Service thread subtasks
// to run. Returns true if there may be more work to do, false if
// nothing to do.
// Called by the service thread to process any pending cleanups for this
// storage object. Drains the _deferred_updates list, and deletes empty
// blocks. Stops deleting if there is an in-progress concurrent
// iteration. Locks both the _allocation_mutex and the _active_mutex, and
// may safepoint. Deletion may be throttled, with only some available
// work performed, in order to allow other Service thread subtasks to run.
// Returns true if there may be more work to do, false if nothing to do.
bool delete_empty_blocks();
// Service thread cleanup support.
// Called by the service thread (while holding Service_lock) to test
// whether a call to delete_empty_blocks should be made.
bool needs_delete_empty_blocks() const;
// Called by safepoint cleanup to notify the service thread (via
// Service_lock) that there may be some OopStorage objects with pending
// cleanups to process.
static void trigger_cleanup_if_needed();
// Called by the service thread (while holding Service_lock) to to test
// for pending cleanup requests, and resets the request state to allow
// recognition of new requests. Returns true if there was a pending
// request.
static bool has_cleanup_work_and_reset();
// Debugging and logging support.
const char* name() const;
@ -232,7 +240,7 @@ AIX_ONLY(private:)
// mutable because this gets set even for const iteration.
mutable int _concurrent_iteration_count;
volatile uint _needs_cleanup;
volatile bool _needs_cleanup;
bool try_add_block();
Block* block_for_allocation();
@ -240,7 +248,6 @@ AIX_ONLY(private:)
Block* find_block_or_null(const oop* ptr) const;
void delete_empty_block(const Block& block);
bool reduce_deferred_updates();
void notify_needs_cleanup();
AIX_ONLY(public:) // xlC 12 on AIX doesn't implement C++ DR45.
void record_needs_cleanup();
AIX_ONLY(private:)

@ -1694,7 +1694,28 @@ void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
// it would be a simple check, which is supposed to be fast. This is also
// safe to do even without degeneration, as CSet iterator is at beginning
// in preparation for evacuation anyway.
collection_set()->clear_current_index();
//
// Before doing that, we need to make sure we never had any cset-pinned
// regions. This may happen if allocation failure happened when evacuating
// the about-to-be-pinned object, oom-evac protocol left the object in
// the collection set, and then the pin reached the cset region. If we continue
// the cycle here, we would trash the cset and alive objects in it. To avoid
// it, we fail degeneration right away and slide into Full GC to recover.
{
collection_set()->clear_current_index();
ShenandoahHeapRegion* r;
while ((r = collection_set()->next()) != NULL) {
if (r->is_pinned()) {
cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
op_degenerated_fail();
return;
}
}
collection_set()->clear_current_index();
}
op_stw_evac();
if (cancelled_gc()) {

@ -128,14 +128,19 @@ void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) co
}
}
static bool load_require_barrier(LoadNode* load) { return ((load->barrier_data() & RequireBarrier) != 0); }
static bool load_has_weak_barrier(LoadNode* load) { return ((load->barrier_data() & WeakBarrier) != 0); }
static bool load_has_expanded_barrier(LoadNode* load) { return ((load->barrier_data() & ExpandedBarrier) != 0); }
const uint NoBarrier = 0;
const uint RequireBarrier = 1;
const uint WeakBarrier = 2;
const uint ExpandedBarrier = 4;
static bool load_require_barrier(LoadNode* load) { return (load->barrier_data() & RequireBarrier) == RequireBarrier; }
static bool load_has_weak_barrier(LoadNode* load) { return (load->barrier_data() & WeakBarrier) == WeakBarrier; }
static bool load_has_expanded_barrier(LoadNode* load) { return (load->barrier_data() & ExpandedBarrier) == ExpandedBarrier; }
static void load_set_expanded_barrier(LoadNode* load) { return load->set_barrier_data(ExpandedBarrier); }
static void load_set_barrier(LoadNode* load, bool weak) {
static void load_set_barrier(LoadNode* load, bool weak) {
if (weak) {
load->set_barrier_data(WeakBarrier);
load->set_barrier_data(RequireBarrier | WeakBarrier);
} else {
load->set_barrier_data(RequireBarrier);
}
@ -1228,7 +1233,6 @@ static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* o
Compile *C = phase->C;
PhaseIterGVN &igvn = phase->igvn();
LoadStoreNode* zclone = NULL;
bool is_weak = false;
Node *in_ctrl = old_node->in(MemNode::Control);
Node *in_mem = old_node->in(MemNode::Memory);
@ -1248,7 +1252,6 @@ static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* o
if (can_simplify_cas(old_node)) {
break;
}
is_weak = true;
zclone = new ZWeakCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
((CompareAndSwapNode*)old_node)->order());
adr_type = TypePtr::BOTTOM;
@ -1279,7 +1282,7 @@ static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* o
igvn.register_new_node_with_optimizer(load);
igvn.replace_node(old_node, zclone);
Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, is_weak);
Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, false /* weak */);
Node *barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
Node *barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);

@ -149,13 +149,6 @@ public:
LoadBarrierNode* load_barrier_node(int idx) const;
};
enum BarrierInfo {
NoBarrier = 0,
RequireBarrier = 1,
WeakBarrier = 3, // Inclusive with RequireBarrier
ExpandedBarrier = 4
};
class ZBarrierSetC2 : public BarrierSetC2 {
private:
ZBarrierSetC2State* state() const;

@ -92,6 +92,11 @@ void ZArguments::initialize() {
// same reason we need fixup_partial_loads
FLAG_SET_DEFAULT(VerifyBeforeIteration, false);
if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
FLAG_SET_DEFAULT(ZVerifyRoots, true);
FLAG_SET_DEFAULT(ZVerifyObjects, true);
}
// Verification of stacks not (yet) supported, for the same reason
// we need fixup_partial_loads
DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));

@ -232,11 +232,11 @@ GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
}
void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl, true /* visit_referents */);
_heap.object_iterate(cl, true /* visit_weaks */);
}
void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl, true /* visit_referents */);
_heap.object_iterate(cl, true /* visit_weaks */);
}
HeapWord* ZCollectedHeap::block_start(const void* addr) const {

@ -31,6 +31,7 @@
#include "gc/z/zMessagePort.inline.hpp"
#include "gc/z/zServiceability.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zVerify.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "runtime/vmOperations.hpp"
@ -86,6 +87,9 @@ public:
GCIdMark gc_id_mark(_gc_id);
IsGCActiveMark gc_active_mark;
// Verify roots
ZVerify::roots_strong();
// Execute operation
_success = do_operation();
@ -301,8 +305,14 @@ void ZDriver::concurrent_reset_relocation_set() {
void ZDriver::pause_verify() {
if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
// Full verification
VM_Verify op;
VMThread::execute(&op);
} else if (ZVerifyRoots || ZVerifyObjects) {
// Limited verification
VM_ZVerifyOperation op;
VMThread::execute(&op);
}
}

@ -41,6 +41,7 @@
#include "gc/z/zTask.hpp"
#include "gc/z/zThread.hpp"
#include "gc/z/zTracer.inline.hpp"
#include "gc/z/zVerify.hpp"
#include "gc/z/zVirtualMemory.inline.hpp"
#include "gc/z/zWorkers.inline.hpp"
#include "logging/log.hpp"
@ -340,6 +341,9 @@ bool ZHeap::mark_end() {
// Enter mark completed phase
ZGlobalPhase = ZPhaseMarkCompleted;
// Verify after mark
ZVerify::after_mark();
// Update statistics
ZStatSample(ZSamplerHeapUsedAfterMark, used());
ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
@ -468,11 +472,11 @@ void ZHeap::relocate() {
used(), used_high(), used_low());
}
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZHeapIterator iter;
iter.objects_do(cl, visit_referents);
iter.objects_do(cl, visit_weaks);
}
void ZHeap::serviceability_initialize() {
@ -518,40 +522,11 @@ void ZHeap::print_extended_on(outputStream* st) const {
st->cr();
}
class ZVerifyRootsTask : public ZTask {
private:
ZStatTimerDisable _disable;
ZRootsIterator _strong_roots;
ZWeakRootsIterator _weak_roots;
public:
ZVerifyRootsTask() :
ZTask("ZVerifyRootsTask"),
_disable(),
_strong_roots(),
_weak_roots() {}
virtual void work() {
ZStatTimerDisable disable;
ZVerifyOopClosure cl;
_strong_roots.oops_do(&cl);
_weak_roots.oops_do(&cl);
}
};
void ZHeap::verify() {
// Heap verification can only be done between mark end and
// relocate start. This is the only window where all oop are
// good and the whole heap is in a consistent state.
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
{
ZVerifyRootsTask task;
_workers.run_parallel(&task);
}
{
ZVerifyObjectClosure cl;
object_iterate(&cl, false /* visit_referents */);
}
ZVerify::after_weak_processing();
}

@ -161,7 +161,7 @@ public:
void relocate();
// Iteration
void object_iterate(ObjectClosure* cl, bool visit_referents);
void object_iterate(ObjectClosure* cl, bool visit_weaks);
// Serviceability
void serviceability_initialize();

@ -22,6 +22,8 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
@ -83,7 +85,7 @@ public:
};
template <bool VisitReferents>
class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
class ZHeapIteratorOopClosure : public ClaimMetadataVisitingOopIterateClosure {
private:
ZHeapIterator* const _iter;
const oop _base;
@ -98,6 +100,7 @@ private:
public:
ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) :
ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other),
_iter(iter),
_base(base) {}
@ -130,6 +133,7 @@ ZHeapIterator::~ZHeapIterator() {
for (ZHeapIteratorBitMap* map; iter.next(&map);) {
delete map;
}
ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other);
}
static size_t object_index_max() {
@ -184,15 +188,23 @@ void ZHeapIterator::push_fields(oop obj) {
obj->oop_iterate(&cl);
}
template <bool VisitReferents>
class ZHeapIterateConcurrentRootsIterator : public ZConcurrentRootsIterator {
public:
ZHeapIterateConcurrentRootsIterator() :
ZConcurrentRootsIterator(ClassLoaderData::_claim_other) {}
};
template <bool VisitWeaks>
void ZHeapIterator::objects_do(ObjectClosure* cl) {
ZStatTimerDisable disable;
// Push roots to visit
push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
push_roots<ZConcurrentRootsIterator, true /* Concurrent */, false /* Weak */>();
push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>();
push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
push_roots<ZHeapIterateConcurrentRootsIterator, true /* Concurrent */, false /* Weak */>();
if (VisitWeaks) {
push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>();
}
// Drain stack
while (!_visit_stack.is_empty()) {
@ -202,14 +214,14 @@ void ZHeapIterator::objects_do(ObjectClosure* cl) {
cl->do_object(obj);
// Push fields to visit
push_fields<VisitReferents>(obj);
push_fields<VisitWeaks>(obj);
}
}
void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_referents) {
if (visit_referents) {
objects_do<true /* VisitReferents */>(cl);
void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_weaks) {
if (visit_weaks) {
objects_do<true /* VisitWeaks */>(cl);
} else {
objects_do<false /* VisitReferents */>(cl);
objects_do<false /* VisitWeaks */>(cl);
}
}

@ -54,7 +54,7 @@ public:
ZHeapIterator();
~ZHeapIterator();
void objects_do(ObjectClosure* cl, bool visit_referents);
void objects_do(ObjectClosure* cl, bool visit_weaks);
};
#endif // SHARE_GC_Z_ZHEAPITERATOR_HPP

@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zMark.inline.hpp"
#include "gc/z/zMarkCache.inline.hpp"
@ -632,14 +633,23 @@ public:
class ZMarkConcurrentRootsTask : public ZTask {
private:
SuspendibleThreadSetJoiner _sts_joiner;
ZConcurrentRootsIterator _roots;
ZMarkConcurrentRootsIteratorClosure _cl;
public:
ZMarkConcurrentRootsTask(ZMark* mark) :
ZTask("ZMarkConcurrentRootsTask"),
_roots(true /* marking */),
_cl() {}
_sts_joiner(true /* active */),
_roots(ClassLoaderData::_claim_strong),
_cl() {
ClassLoaderDataGraph_lock->lock();
ClassLoaderDataGraph::clear_claimed_marks();
}
~ZMarkConcurrentRootsTask() {
ClassLoaderDataGraph_lock->unlock();
}
virtual void work() {
_roots.oops_do(&_cl);

@ -1,60 +0,0 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zHeap.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "gc/z/zOop.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
void ZVerifyOopClosure::do_oop(oop* p) {
guarantee(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
guarantee(!ZResurrection::is_blocked(), "Invalid phase");
const oop o = RawAccess<>::oop_load(p);
if (o != NULL) {
const uintptr_t addr = ZOop::to_address(o);
const uintptr_t good_addr = ZAddress::good(addr);
guarantee(ZAddress::is_good(addr) || ZAddress::is_finalizable_good(addr),
"Bad oop " PTR_FORMAT " found at " PTR_FORMAT ", expected " PTR_FORMAT,
addr, p2i(p), good_addr);
guarantee(oopDesc::is_oop(ZOop::from_address(good_addr)),
"Bad object " PTR_FORMAT " found at " PTR_FORMAT,
addr, p2i(p));
}
}
void ZVerifyOopClosure::do_oop(narrowOop* p) {
ShouldNotReachHere();
}
void ZVerifyObjectClosure::do_object(oop o) {
ZVerifyOopClosure cl;
o->oop_iterate(&cl);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,16 +46,13 @@ public:
};
template <bool finalizable>
class ZMarkBarrierOopClosure : public MetadataVisitingOopIterateClosure {
class ZMarkBarrierOopClosure : public ClaimMetadataVisitingOopIterateClosure {
public:
ZMarkBarrierOopClosure();
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
virtual void do_klass(Klass* k);
virtual void do_cld(ClassLoaderData* cld);
#ifdef ASSERT
virtual bool should_verify_oops() {
return false;
@ -80,26 +77,4 @@ public:
virtual void do_oop(narrowOop* p);
};
class ZVerifyOopClosure : public ZRootsIteratorClosure, public BasicOopIterateClosure {
public:
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
virtual ReferenceIterationMode reference_iteration_mode() {
return DO_FIELDS;
}
#ifdef ASSERT
// Verification handled by the closure itself
virtual bool should_verify_oops() {
return false;
}
#endif
};
class ZVerifyObjectClosure : public ObjectClosure {
public:
virtual void do_object(oop o);
};
#endif // SHARE_GC_Z_ZOOPCLOSURES_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,12 @@ inline void ZNMethodOopClosure::do_oop(narrowOop* p) {
template <bool finalizable>
inline ZMarkBarrierOopClosure<finalizable>::ZMarkBarrierOopClosure() :
MetadataVisitingOopIterateClosure(finalizable ? NULL : ZHeap::heap()->reference_discoverer()) {}
ClaimMetadataVisitingOopIterateClosure(finalizable
? ClassLoaderData::_claim_finalizable
: ClassLoaderData::_claim_strong,
finalizable
? NULL
: ZHeap::heap()->reference_discoverer()) {}
template <bool finalizable>
inline void ZMarkBarrierOopClosure<finalizable>::do_oop(oop* p) {
@ -67,18 +72,6 @@ inline void ZMarkBarrierOopClosure<finalizable>::do_oop(narrowOop* p) {
ShouldNotReachHere();
}
template <bool finalizable>
inline void ZMarkBarrierOopClosure<finalizable>::do_klass(Klass* k) {
ClassLoaderData* const cld = k->class_loader_data();
ZMarkBarrierOopClosure<finalizable>::do_cld(cld);
}
template <bool finalizable>
inline void ZMarkBarrierOopClosure<finalizable>::do_cld(ClassLoaderData* cld) {
const int claim = finalizable ? ClassLoaderData::_claim_finalizable : ClassLoaderData::_claim_strong;
cld->oops_do(this, claim);
}
inline bool ZPhantomIsAliveObjectClosure::do_object_b(oop o) {
return ZBarrier::is_alive_barrier_on_phantom_oop(o);
}

@ -263,24 +263,16 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_ex
}
}
ZConcurrentRootsIterator::ZConcurrentRootsIterator(bool marking) :
_marking(marking),
_sts_joiner(marking /* active */),
ZConcurrentRootsIterator::ZConcurrentRootsIterator(int cld_claim) :
_jni_handles_iter(JNIHandles::global_handles()),
_cld_claim(cld_claim),
_jni_handles(this),
_class_loader_data_graph(this) {
ZStatTimer timer(ZSubPhaseConcurrentRootsSetup);
if (_marking) {
ClassLoaderDataGraph_lock->lock();
ClassLoaderDataGraph::clear_claimed_marks();
}
}
ZConcurrentRootsIterator::~ZConcurrentRootsIterator() {
ZStatTimer timer(ZSubPhaseConcurrentRootsTeardown);
if (_marking) {
ClassLoaderDataGraph_lock->unlock();
}
}
void ZConcurrentRootsIterator::do_jni_handles(ZRootsIteratorClosure* cl) {
@ -290,13 +282,8 @@ void ZConcurrentRootsIterator::do_jni_handles(ZRootsIteratorClosure* cl) {
void ZConcurrentRootsIterator::do_class_loader_data_graph(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph);
if (_marking) {
CLDToOopClosure cld_cl(cl, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
} else {
CLDToOopClosure cld_cl(cl, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::cld_do(&cld_cl);
}
CLDToOopClosure cld_cl(cl, _cld_claim);
ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
}
void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,9 +111,8 @@ public:
class ZConcurrentRootsIterator {
private:
const bool _marking;
SuspendibleThreadSetJoiner _sts_joiner;
ZOopStorageIterator _jni_handles_iter;
int _cld_claim;
void do_jni_handles(ZRootsIteratorClosure* cl);
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
@ -122,7 +121,7 @@ private:
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
public:
ZConcurrentRootsIterator(bool marking = false);
ZConcurrentRootsIterator(int cld_claim);
~ZConcurrentRootsIterator();
void oops_do(ZRootsIteratorClosure* cl);

@ -0,0 +1,187 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/z/zAddress.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zOop.hpp"
#include "gc/z/zResurrection.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zVerify.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/oop.inline.hpp"
#define BAD_OOP_REPORT(addr) \
"Bad oop " PTR_FORMAT " found at " PTR_FORMAT ", expected " PTR_FORMAT, \
addr, p2i(p), ZAddress::good(addr)
class ZVerifyRootsClosure : public ZRootsIteratorClosure {
public:
virtual void do_oop(oop* p) {
uintptr_t value = ZOop::to_address(*p);
if (value == 0) {
return;
}
guarantee(!ZAddress::is_finalizable(value), BAD_OOP_REPORT(value));
guarantee(ZAddress::is_good(value), BAD_OOP_REPORT(value));
guarantee(oopDesc::is_oop(ZOop::from_address(value)), BAD_OOP_REPORT(value));
}
virtual void do_oop(narrowOop*) { ShouldNotReachHere(); }
};
template <bool VisitReferents>
class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure, public ZRootsIteratorClosure {
public:
ZVerifyOopClosure() :
ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other) {}
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
virtual ReferenceIterationMode reference_iteration_mode() {
return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
}
#ifdef ASSERT
// Verification handled by the closure itself
virtual bool should_verify_oops() {
return false;
}
#endif
};
class ZVerifyObjectClosure : public ObjectClosure {
private:
bool _visit_referents;
public:
ZVerifyObjectClosure(bool visit_referents) : _visit_referents(visit_referents) {}
virtual void do_object(oop o);
};
template <typename RootsIterator>
void ZVerify::roots_impl() {
if (ZVerifyRoots) {
ZVerifyRootsClosure cl;
RootsIterator iter;
iter.oops_do(&cl);
}
}
void ZVerify::roots_strong() {
roots_impl<ZRootsIterator>();
}
class ZVerifyConcurrentRootsIterator : public ZConcurrentRootsIterator {
public:
ZVerifyConcurrentRootsIterator()
: ZConcurrentRootsIterator(ClassLoaderData::_claim_none) {}
};
void ZVerify::roots_concurrent() {
roots_impl<ZVerifyConcurrentRootsIterator>();
}
void ZVerify::roots_weak() {
assert(!ZResurrection::is_blocked(), "Invalid phase");
roots_impl<ZWeakRootsIterator>();
}
void ZVerify::roots(bool verify_weaks) {
roots_strong();
roots_concurrent();
if (verify_weaks) {
roots_weak();
roots_concurrent_weak();
}
}
void ZVerify::objects(bool verify_weaks) {
if (ZVerifyObjects) {
ZVerifyObjectClosure cl(verify_weaks);
ZHeap::heap()->object_iterate(&cl, verify_weaks);
}
}
void ZVerify::roots_concurrent_weak() {
assert(!ZResurrection::is_blocked(), "Invalid phase");
roots_impl<ZConcurrentWeakRootsIterator>();
}
void ZVerify::roots_and_objects(bool verify_weaks) {
ZStatTimerDisable _disable;
roots(verify_weaks);
objects(verify_weaks);
}
void ZVerify::after_mark() {
// Only verify strong roots and references.
roots_and_objects(false /* verify_weaks */);
}
void ZVerify::after_weak_processing() {
// Also verify weaks - all should have been processed at this point.
roots_and_objects(true /* verify_weaks */);
}
template <bool VisitReferents>
void ZVerifyOopClosure<VisitReferents>::do_oop(oop* p) {
guarantee(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
guarantee(!ZResurrection::is_blocked(), "Invalid phase");
const oop o = RawAccess<>::oop_load(p);
if (o == NULL) {
return;
}
const uintptr_t addr = ZOop::to_address(o);
if (VisitReferents) {
guarantee(ZAddress::is_good(addr) || ZAddress::is_finalizable_good(addr), BAD_OOP_REPORT(addr));
} else {
// Should not encounter finalizable oops through strong-only paths. Assumes only strong roots are visited.
guarantee(ZAddress::is_good(addr), BAD_OOP_REPORT(addr));
}
const uintptr_t good_addr = ZAddress::good(addr);
guarantee(oopDesc::is_oop(ZOop::from_address(good_addr)), BAD_OOP_REPORT(addr));
}
void ZVerifyObjectClosure::do_object(oop o) {
if (_visit_referents) {
ZVerifyOopClosure<true /* VisitReferents */> cl;
o->oop_iterate((OopIterateClosure*)&cl);
} else {
ZVerifyOopClosure<false /* VisitReferents */> cl;
o->oop_iterate(&cl);
}
}

@ -0,0 +1,74 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZVERIFY_HPP
#define SHARE_GC_Z_ZVERIFY_HPP
#include "memory/allocation.hpp"
class ZVerify : public AllStatic {
private:
template <typename RootsIterator>
static void roots_impl();
static void roots(bool verify_weaks);
static void roots_weak();
static void roots_concurrent();
static void roots_concurrent_weak();
static void objects(bool verify_weaks);
static void roots_and_objects(bool visit_weaks);
public:
// Verify strong (non-concurrent) roots. Should always be good.
static void roots_strong();
// Verify all strong roots and references after marking.
static void after_mark();
// Verify strong and weak roots and references.
static void after_weak_processing();
};
class VM_ZVerifyOperation : public VM_Operation {
public:
virtual bool needs_inactive_gc_locker() const {
// An inactive GC locker is needed in operations where we change the bad
// mask or move objects. Changing the bad mask will invalidate all oops,
// which makes it conceptually the same thing as moving all objects.
return false;
}
virtual void doit() {
ZVerify::after_weak_processing();
}
bool success() const {
return true;
}
virtual VMOp_Type type() const { return VMOp_ZVerify; }
};
#endif // SHARE_GC_Z_ZVERIFY_HPP

@ -76,6 +76,12 @@
diagnostic(bool, ZVerifyViews, false, \
"Verify heap view accesses") \
\
diagnostic(bool, ZVerifyRoots, trueInDebug, \
"Verify roots") \
\
diagnostic(bool, ZVerifyObjects, false, \
"Verify objects") \
\
diagnostic(bool, ZVerifyMarking, false, \
"Verify marking stacks") \
\

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "interpreter/templateInterpreter.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "interpreter/templateTable.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/timerTrace.hpp"
@ -283,9 +284,13 @@ static inline void copy_table(address* from, address* to, int size) {
void TemplateInterpreter::notice_safepoints() {
if (!_notice_safepoints) {
log_debug(interpreter, safepoint)("switching active_table to safept_table.");
// switch to safepoint dispatch table
_notice_safepoints = true;
copy_table((address*)&_safept_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
} else {
log_debug(interpreter, safepoint)("active_table is already safept_table; "
"notice_safepoints() call is no-op.");
}
}
@ -297,10 +302,17 @@ void TemplateInterpreter::notice_safepoints() {
void TemplateInterpreter::ignore_safepoints() {
if (_notice_safepoints) {
if (!JvmtiExport::should_post_single_step()) {
log_debug(interpreter, safepoint)("switching active_table to normal_table.");
// switch to normal dispatch table
_notice_safepoints = false;
copy_table((address*)&_normal_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
} else {
log_debug(interpreter, safepoint)("single stepping is still active; "
"ignoring ignore_safepoints() call.");
}
} else {
log_debug(interpreter, safepoint)("active_table is already normal_table; "
"ignore_safepoints() call is no-op.");
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -99,7 +99,6 @@ void BFSClosure::log_dfs_fallback() const {
}
void BFSClosure::process() {
process_root_set();
process_queue();
}
@ -138,7 +137,6 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
// if we are processinig initial root set, don't add to queue
if (_current_parent != NULL) {
assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant");
_edge_queue->add(_current_parent, reference);
}
@ -151,20 +149,8 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
void BFSClosure::add_chain(const oop* reference, const oop pointee) {
assert(pointee != NULL, "invariant");
assert(NULL == pointee->mark(), "invariant");
const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2;
ResourceMark rm;
Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
size_t idx = 0;
chain[idx++] = Edge(NULL, reference);
// aggregate from breadth-first search
const Edge* current = _current_parent;
while (current != NULL) {
chain[idx++] = Edge(NULL, current->reference());
current = current->parent();
}
assert(length == idx, "invariant");
_edge_store->add_chain(chain, length);
Edge leak_edge(_current_parent, reference);
_edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
}
void BFSClosure::dfs_fallback() {
@ -241,3 +227,12 @@ void BFSClosure::do_oop(narrowOop* ref) {
closure_impl(UnifiedOop::encode(ref), pointee);
}
}
void BFSClosure::do_root(const oop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
assert(*ref != NULL, "invariant");
if (!_edge_queue->is_full()) {
_edge_queue->add(NULL, ref);
}
}

@ -26,7 +26,6 @@
#define SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class BitSet;
class Edge;
@ -65,6 +64,7 @@ class BFSClosure : public BasicOopIterateClosure {
public:
BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
void process();
void do_root(const oop* ref);
virtual void do_oop(oop* ref);
virtual void do_oop(narrowOop* ref);

@ -47,7 +47,7 @@ class BitSet : public CHeapObj<mtTracing> {
BitMap::idx_t mark_obj(const HeapWord* addr) {
const BitMap::idx_t bit = addr_to_bit(addr);
_bits.par_set_bit(bit);
_bits.set_bit(bit);
return bit;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,14 +23,14 @@
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
@ -88,15 +88,15 @@ void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
// Mark root set, to avoid going sideways
_max_depth = 1;
_ignore_root_set = false;
DFSClosure dfs1;
RootSetClosure::process_roots(&dfs1);
DFSClosure dfs;
RootSetClosure<DFSClosure> rs(&dfs);
rs.process();
// Depth-first search
_max_depth = max_dfs_depth;
_ignore_root_set = true;
assert(_start_edge == NULL, "invariant");
DFSClosure dfs2;
RootSetClosure::process_roots(&dfs2);
rs.process();
}
void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
@ -133,30 +133,29 @@ void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
}
void DFSClosure::add_chain() {
const size_t length = _start_edge == NULL ? _depth + 1 :
_start_edge->distance_to_root() + 1 + _depth + 1;
const size_t array_length = _depth + 2;
ResourceMark rm;
Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
Edge* const chain = NEW_RESOURCE_ARRAY(Edge, array_length);
size_t idx = 0;
// aggregate from depth-first search
const DFSClosure* c = this;
while (c != NULL) {
chain[idx++] = Edge(NULL, c->reference());
const size_t next = idx + 1;
chain[idx++] = Edge(&chain[next], c->reference());
c = c->parent();
}
assert(idx == _depth + 1, "invariant");
assert(_depth + 1 == idx, "invariant");
assert(array_length == idx + 1, "invariant");
// aggregate from breadth-first search
const Edge* current = _start_edge;
while (current != NULL) {
chain[idx++] = Edge(NULL, current->reference());
current = current->parent();
if (_start_edge != NULL) {
chain[idx++] = *_start_edge;
} else {
chain[idx - 1] = Edge(NULL, chain[idx - 1].reference());
}
assert(idx == length, "invariant");
_edge_store->add_chain(chain, length);
_edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0));
}
void DFSClosure::do_oop(oop* ref) {
@ -176,3 +175,11 @@ void DFSClosure::do_oop(narrowOop* ref) {
closure_impl(UnifiedOop::encode(ref), pointee);
}
}
void DFSClosure::do_root(const oop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
const oop pointee = *ref;
assert(pointee != NULL, "invariant");
closure_impl(ref, pointee);
}

@ -26,7 +26,6 @@
#define SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class BitSet;
class Edge;
@ -34,7 +33,7 @@ class EdgeStore;
class EdgeQueue;
// Class responsible for iterating the heap depth-first
class DFSClosure: public BasicOopIterateClosure {
class DFSClosure : public BasicOopIterateClosure {
private:
static EdgeStore* _edge_store;
static BitSet* _mark_bits;
@ -57,6 +56,7 @@ class DFSClosure: public BasicOopIterateClosure {
public:
static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
void do_root(const oop* ref);
virtual void do_oop(oop* ref);
virtual void do_oop(narrowOop* ref);

@ -29,7 +29,7 @@
#include "oops/oopsHierarchy.hpp"
class Edge {
private:
protected:
const Edge* _parent;
const oop* _reference;
public:

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,37 +27,17 @@
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
#include "oops/oop.inline.hpp"
RoutableEdge::RoutableEdge() : Edge() {}
RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference),
_skip_edge(NULL),
_skip_length(0),
_processed(false) {}
StoredEdge::StoredEdge() : Edge() {}
StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {}
RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge),
_skip_edge(NULL),
_skip_length(0),
_processed(false) {}
StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {}
RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge),
_skip_edge(edge._skip_edge),
_skip_length(edge._skip_length),
_processed(edge._processed) {}
StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {}
void RoutableEdge::operator=(const RoutableEdge& edge) {
void StoredEdge::operator=(const StoredEdge& edge) {
Edge::operator=(edge);
_skip_edge = edge._skip_edge;
_gc_root_id = edge._gc_root_id;
_skip_length = edge._skip_length;
_processed = edge._processed;
}
size_t RoutableEdge::logical_distance_to_root() const {
size_t depth = 0;
const RoutableEdge* current = logical_parent();
while (current != NULL) {
depth++;
current = current->logical_parent();
}
return depth;
}
traceid EdgeStore::_edge_id_counter = 0;
@ -69,79 +49,12 @@ EdgeStore::EdgeStore() : _edges(NULL) {
EdgeStore::~EdgeStore() {
assert(_edges != NULL, "invariant");
delete _edges;
_edges = NULL;
}
const Edge* EdgeStore::get_edge(const Edge* edge) const {
assert(edge != NULL, "invariant");
EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
return entry != NULL ? entry->literal_addr() : NULL;
}
const Edge* EdgeStore::put(const Edge* edge) {
assert(edge != NULL, "invariant");
const RoutableEdge e = *edge;
assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant");
EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference());
return entry.literal_addr();
}
traceid EdgeStore::get_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
assert(entry != NULL, "invariant");
return entry->id();
}
traceid EdgeStore::get_root_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
const Edge* root = EdgeUtils::root(*edge);
assert(root != NULL, "invariant");
return get_id(root);
}
void EdgeStore::add_chain(const Edge* chain, size_t length) {
assert(chain != NULL, "invariant");
assert(length > 0, "invariant");
size_t bottom_index = length - 1;
const size_t top_index = 0;
const Edge* stored_parent_edge = NULL;
// determine level of shared ancestry
for (; bottom_index > top_index; --bottom_index) {
const Edge* stored_edge = get_edge(&chain[bottom_index]);
if (stored_edge != NULL) {
stored_parent_edge = stored_edge;
continue;
}
break;
}
// insertion of new Edges
for (int i = (int)bottom_index; i >= (int)top_index; --i) {
Edge edge(stored_parent_edge, chain[i].reference());
stored_parent_edge = put(&edge);
}
const oop sample_object = stored_parent_edge->pointee();
assert(sample_object != NULL, "invariant");
assert(NULL == sample_object->mark(), "invariant");
// Install the "top" edge of the chain into the sample object mark oop.
// This associates the sample object with its navigable reference chain.
sample_object->set_mark(markOop(stored_parent_edge));
}
bool EdgeStore::is_empty() const {
return !_edges->has_entries();
}
size_t EdgeStore::number_of_entries() const {
return _edges->cardinality();
}
void EdgeStore::assign_id(EdgeEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry->id() == 0, "invariant");
@ -153,3 +66,254 @@ bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry
assert(entry->hash() == hash, "invariant");
return true;
}
#ifdef ASSERT
bool EdgeStore::contains(const oop* reference) const {
return get(reference) != NULL;
}
#endif
StoredEdge* EdgeStore::get(const oop* reference) const {
assert(reference != NULL, "invariant");
const StoredEdge e(NULL, reference);
EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
return entry != NULL ? entry->literal_addr() : NULL;
}
StoredEdge* EdgeStore::put(const oop* reference) {
assert(reference != NULL, "invariant");
const StoredEdge e(NULL, reference);
assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
return entry.literal_addr();
}
traceid EdgeStore::get_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
assert(entry != NULL, "invariant");
return entry->id();
}
traceid EdgeStore::gc_root_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
const traceid gc_root_id = static_cast<const StoredEdge*>(edge)->gc_root_id();
if (gc_root_id != 0) {
return gc_root_id;
}
// not cached
assert(edge != NULL, "invariant");
const Edge* const root = EdgeUtils::root(*edge);
assert(root != NULL, "invariant");
assert(root->parent() == NULL, "invariant");
return get_id(root);
}
static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_root, size_t* skip_length) {
assert(distance_to_root >= EdgeUtils::root_context, "invariant");
assert(*skip_length == 0, "invariant");
*skip_length = distance_to_root - (EdgeUtils::root_context - 1);
const Edge* const target = EdgeUtils::ancestor(**current, *skip_length);
assert(target != NULL, "invariant");
assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant");
return target;
}
bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) {
assert(*previous != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
assert(*current != NULL, "invariant");
assert((*current)->distance_to_root() == distance_to_root, "invariant");
if (distance_to_root < EdgeUtils::root_context) {
// nothing to skip
return false;
}
size_t skip_length = 0;
const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length);
assert(skip_ancestor != NULL, "invariant");
(*previous)->set_skip_length(skip_length);
// lookup target
StoredEdge* stored_target = get(skip_ancestor->reference());
if (stored_target != NULL) {
(*previous)->set_parent(stored_target);
// linked to existing, complete
return true;
}
assert(stored_target == NULL, "invariant");
stored_target = put(skip_ancestor->reference());
assert(stored_target != NULL, "invariant");
(*previous)->set_parent(stored_target);
*previous = stored_target;
*current = skip_ancestor->parent();
return false;
}
static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) {
assert(current_stored != NULL, "invariant");
assert(*previous != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
(*previous)->set_parent(current_stored);
}
static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) {
assert(edge != NULL, "invariant");
assert(distance != NULL, "invariant");
const StoredEdge* current = edge;
*distance = 1;
while (current != NULL && !current->is_skip_edge()) {
++(*distance);
current = current->parent();
}
return current;
}
void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) {
assert(current_stored != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
size_t distance_to_skip_edge; // including the skip edge itself
const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge);
if (closest_skip_edge == NULL) {
// no found skip edge implies root
if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) {
link_edge(current_stored, previous);
return;
}
assert(current_stored->distance_to_root() == distance_to_skip_edge - 2, "invariant");
put_skip_edge(previous, reinterpret_cast<const Edge**>(&current_stored), distance_to_skip_edge - 2);
return;
}
assert(closest_skip_edge->is_skip_edge(), "invariant");
if (distance_to_skip_edge + previous_length <= EdgeUtils::leak_context) {
link_edge(current_stored, previous);
return;
}
// create a new skip edge with derived information from closest skip edge
(*previous)->set_skip_length(distance_to_skip_edge + closest_skip_edge->skip_length());
(*previous)->set_parent(closest_skip_edge->parent());
}
StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) {
assert(*previous != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
assert(*current != NULL, "invariant");
assert(!contains((*current)->reference()), "invariant");
StoredEdge* const stored_edge = put((*current)->reference());
assert(stored_edge != NULL, "invariant");
link_edge(stored_edge, previous);
return stored_edge;
}
bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) {
assert(*previous != NULL, "invariant");
assert(*current != NULL, "invariant");
size_t depth = 1;
while (*current != NULL && depth < limit) {
StoredEdge* stored_edge = get((*current)->reference());
if (stored_edge != NULL) {
link_with_existing_chain(stored_edge, previous, depth);
return true;
}
stored_edge = link_new_edge(previous, current);
assert((*previous)->parent() != NULL, "invariant");
*previous = stored_edge;
*current = (*current)->parent();
++depth;
}
return NULL == *current;
}
// Install the immediate edge into the mark word of the leak candidate object
StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
assert(edge != NULL, "invariant");
assert(!contains(edge->reference()), "invariant");
StoredEdge* const leak_context_edge = put(edge->reference());
oop sample_object = edge->pointee();
assert(sample_object != NULL, "invariant");
assert(NULL == sample_object->mark(), "invariant");
sample_object->set_mark(markOop(leak_context_edge));
return leak_context_edge;
}
/*
* The purpose of put_chain() is to reify the edge sequence
* discovered during heap traversal with a normalized logical copy.
* This copy consist of two sub-sequences and a connecting link (skip edge).
*
* "current" can be thought of as the cursor (search) edge, it is not in the edge store.
* "previous" is always an edge in the edge store.
* The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store.
*/
void EdgeStore::put_chain(const Edge* chain, size_t length) {
assert(chain != NULL, "invariant");
assert(chain->distance_to_root() + 1 == length, "invariant");
StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain);
assert(leak_context_edge != NULL, "invariant");
assert(leak_context_edge->parent() == NULL, "invariant");
if (1 == length) {
return;
}
const Edge* current = chain->parent();
assert(current != NULL, "invariant");
StoredEdge* previous = leak_context_edge;
// a leak context is the sequence of (limited) edges reachable from the leak candidate
if (put_edges(&previous, &current, EdgeUtils::leak_context)) {
// complete
assert(previous != NULL, "invariant");
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
return;
}
const size_t distance_to_root = length > EdgeUtils::leak_context ? length - 1 - EdgeUtils::leak_context : length - 1;
assert(current->distance_to_root() == distance_to_root, "invariant");
// a skip edge is the logical link
// connecting the leak context sequence with the root context sequence
if (put_skip_edge(&previous, &current, distance_to_root)) {
// complete
assert(previous != NULL, "invariant");
assert(previous->is_skip_edge(), "invariant");
assert(previous->parent() != NULL, "invariant");
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent()));
return;
}
assert(current->distance_to_root() < EdgeUtils::root_context, "invariant");
// a root context is the sequence of (limited) edges reachable from the root
put_edges(&previous, &current, EdgeUtils::root_context);
assert(previous != NULL, "invariant");
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
}
void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const {
assert(leak_context_edge != NULL, "invariant");
assert(root != NULL, "invariant");
store_gc_root_id_in_leak_context_edge(leak_context_edge, root);
assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant");
}
// To avoid another traversal to resolve the root edge id later,
// cache it in the immediate leak context edge for fast retrieval.
void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const {
assert(leak_context_edge != NULL, "invariant");
assert(leak_context_edge->gc_root_id() == 0, "invariant");
assert(root != NULL, "invariant");
assert(root->parent() == NULL, "invariant");
assert(root->distance_to_root() == 0, "invariant");
const StoredEdge* const stored_root = static_cast<const StoredEdge*>(root);
traceid root_id = stored_root->gc_root_id();
if (root_id == 0) {
root_id = get_id(root);
stored_root->set_gc_root_id(root_id);
}
assert(root_id != 0, "invariant");
leak_context_edge->set_gc_root_id(root_id);
assert(leak_context_edge->gc_root_id() == stored_root->gc_root_id(), "invariant");
}

@ -25,64 +25,40 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
#include "jfr/utilities/jfrHashtable.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/utilities/jfrHashtable.hpp"
#include "memory/allocation.hpp"
typedef u8 traceid;
class RoutableEdge : public Edge {
class StoredEdge : public Edge {
private:
mutable const RoutableEdge* _skip_edge;
mutable size_t _skip_length;
mutable bool _processed;
mutable traceid _gc_root_id;
size_t _skip_length;
public:
RoutableEdge();
RoutableEdge(const Edge* parent, const oop* reference);
RoutableEdge(const Edge& edge);
RoutableEdge(const RoutableEdge& edge);
void operator=(const RoutableEdge& edge);
StoredEdge();
StoredEdge(const Edge* parent, const oop* reference);
StoredEdge(const Edge& edge);
StoredEdge(const StoredEdge& edge);
void operator=(const StoredEdge& edge);
const RoutableEdge* skip_edge() const { return _skip_edge; }
traceid gc_root_id() const { return _gc_root_id; }
void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; }
bool is_skip_edge() const { return _skip_length != 0; }
size_t skip_length() const { return _skip_length; }
void set_skip_length(size_t length) { _skip_length = length; }
bool is_skip_edge() const { return _skip_edge != NULL; }
bool processed() const { return _processed; }
bool is_sentinel() const {
return _skip_edge == NULL && _skip_length == 1;
void set_parent(const Edge* edge) { this->_parent = edge; }
StoredEdge* parent() const {
return const_cast<StoredEdge*>(static_cast<const StoredEdge*>(Edge::parent()));
}
void set_skip_edge(const RoutableEdge* edge) const {
assert(!is_skip_edge(), "invariant");
assert(edge != this, "invariant");
_skip_edge = edge;
}
void set_skip_length(size_t length) const {
_skip_length = length;
}
void set_processed() const {
assert(!_processed, "invariant");
_processed = true;
}
// true navigation according to physical tree representation
const RoutableEdge* physical_parent() const {
return static_cast<const RoutableEdge*>(parent());
}
// logical navigation taking skip levels into account
const RoutableEdge* logical_parent() const {
return is_skip_edge() ? skip_edge() : physical_parent();
}
size_t logical_distance_to_root() const;
};
class EdgeStore : public CHeapObj<mtTracing> {
typedef HashTableHost<RoutableEdge, traceid, Entry, EdgeStore> EdgeHashTable;
typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
typedef EdgeHashTable::HashEntry EdgeEntry;
template <typename,
typename,
@ -90,6 +66,9 @@ class EdgeStore : public CHeapObj<mtTracing> {
typename,
size_t>
friend class HashTableHost;
friend class EventEmitter;
friend class ObjectSampleWriter;
friend class ObjectSampleCheckpoint;
private:
static traceid _edge_id_counter;
EdgeHashTable* _edges;
@ -98,22 +77,31 @@ class EdgeStore : public CHeapObj<mtTracing> {
void assign_id(EdgeEntry* entry);
bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
const Edge* get_edge(const Edge* edge) const;
const Edge* put(const Edge* edge);
StoredEdge* get(const oop* reference) const;
StoredEdge* put(const oop* reference);
traceid gc_root_id(const Edge* edge) const;
bool put_edges(StoredEdge** previous, const Edge** current, size_t length);
bool put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root);
void put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const;
StoredEdge* associate_leak_context_with_candidate(const Edge* edge);
void store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const;
StoredEdge* link_new_edge(StoredEdge** previous, const Edge** current);
void link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length);
template <typename T>
void iterate(T& functor) const { _edges->iterate_value<T>(functor); }
DEBUG_ONLY(bool contains(const oop* reference) const;)
public:
EdgeStore();
~EdgeStore();
void add_chain(const Edge* chain, size_t length);
bool is_empty() const;
size_t number_of_entries() const;
traceid get_id(const Edge* edge) const;
traceid get_root_id(const Edge* edge) const;
template <typename T>
void iterate_edges(T& functor) const { _edges->iterate_value<T>(functor); }
void put_chain(const Edge* chain, size_t length);
};
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,11 +38,7 @@ bool EdgeUtils::is_leak_edge(const Edge& edge) {
return (const Edge*)edge.pointee()->mark() == &edge;
}
bool EdgeUtils::is_root(const Edge& edge) {
return edge.is_root();
}
static int field_offset(const Edge& edge) {
static int field_offset(const StoredEdge& edge) {
assert(!edge.is_root(), "invariant");
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
@ -56,7 +52,7 @@ static int field_offset(const Edge& edge) {
return offset;
}
static const InstanceKlass* field_type(const Edge& edge) {
static const InstanceKlass* field_type(const StoredEdge& edge) {
assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant");
return (const InstanceKlass*)edge.reference_owner_klass();
}
@ -138,175 +134,18 @@ const Edge* EdgeUtils::root(const Edge& edge) {
current = parent;
parent = current->parent();
}
assert(current != NULL, "invariant");
return current;
}
// The number of references associated with the leak node;
// can be viewed as the leak node "context".
// Used to provide leak context for a "capped/skipped" reference chain.
static const size_t leak_context = 100;
// The number of references associated with the root node;
// can be viewed as the root node "context".
// Used to provide root context for a "capped/skipped" reference chain.
static const size_t root_context = 100;
// A limit on the reference chain depth to be serialized,
static const size_t max_ref_chain_depth = leak_context + root_context;
const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) {
const RoutableEdge* current = &edge;
const RoutableEdge* parent = current->physical_parent();
const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) {
const Edge* current = &edge;
const Edge* parent = current->parent();
size_t seek = 0;
while (parent != NULL && seek != skip_length) {
while (parent != NULL && seek != distance) {
seek++;
current = parent;
parent = parent->physical_parent();
parent = parent->parent();
}
return current;
}
#ifdef ASSERT
static void validate_skip_target(const RoutableEdge* skip_target) {
assert(skip_target != NULL, "invariant");
assert(skip_target->distance_to_root() + 1 == root_context, "invariant");
assert(skip_target->is_sentinel(), "invariant");
}
static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) {
assert(new_skip_edge != NULL, "invariant");
assert(new_skip_edge->is_skip_edge(), "invariant");
if (last_skip_edge != NULL) {
const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment);
validate_skip_target(target->logical_parent());
return;
}
assert(last_skip_edge == NULL, "invariant");
// only one level of logical indirection
validate_skip_target(new_skip_edge->logical_parent());
}
#endif // ASSERT
static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) {
assert(new_skip_edge != NULL, "invariant");
assert(!new_skip_edge->is_skip_edge(), "invariant");
assert(!new_skip_edge->processed(), "invariant");
const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance);
assert(skip_target != NULL, "invariant");
new_skip_edge->set_skip_edge(skip_target);
new_skip_edge->set_skip_length(skip_target_distance);
assert(new_skip_edge->is_skip_edge(), "invariant");
assert(new_skip_edge->logical_parent() == skip_target, "invariant");
}
static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) {
assert(distance == 0, "invariant");
const RoutableEdge* current = &edge;
while (current != NULL) {
if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) {
return current;
}
current = current->physical_parent();
++distance;
}
return current;
}
static void collapse_overlapping_chain(const RoutableEdge& edge,
const RoutableEdge* first_processed_edge,
size_t first_processed_distance) {
assert(first_processed_edge != NULL, "invariant");
// first_processed_edge is already processed / written
assert(first_processed_edge->processed(), "invariant");
assert(first_processed_distance + 1 <= leak_context, "invariant");
// from this first processed edge, attempt to fetch the last skip edge
size_t last_skip_edge_distance = 0;
const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance);
const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1;
if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) {
// complete chain can be accommodated without modification
return;
}
// backtrack one edge from existing processed edge
const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1);
assert(new_skip_edge != NULL, "invariant");
assert(!new_skip_edge->processed(), "invariant");
assert(new_skip_edge->parent() == first_processed_edge, "invariant");
size_t adjustment = 0;
if (last_skip_edge != NULL) {
assert(leak_context - 1 > first_processed_distance - 1, "invariant");
adjustment = leak_context - first_processed_distance - 1;
assert(last_skip_edge_distance + 1 > adjustment, "invariant");
install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment);
} else {
install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context);
new_skip_edge->logical_parent()->set_skip_length(1); // sentinel
}
DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);)
}
static void collapse_non_overlapping_chain(const RoutableEdge& edge,
const RoutableEdge* first_processed_edge,
size_t first_processed_distance) {
assert(first_processed_edge != NULL, "invariant");
assert(!first_processed_edge->processed(), "invariant");
// this implies that the first "processed" edge is the leak context relative "leaf"
assert(first_processed_distance + 1 == leak_context, "invariant");
const size_t distance_to_root = edge.distance_to_root();
if (distance_to_root + 1 <= max_ref_chain_depth) {
// complete chain can be accommodated without constructing a skip edge
return;
}
install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context);
first_processed_edge->logical_parent()->set_skip_length(1); // sentinel
DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);)
}
static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) {
assert(distance == 0, "invariant");
const RoutableEdge* current = &edge;
while (current != NULL && distance < leak_context - 1) {
if (current->processed()) {
return current;
}
current = current->physical_parent();
++distance;
}
assert(distance <= leak_context - 1, "invariant");
return current;
}
/*
* Some vocabulary:
* -----------
* "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
* "Processed / written" means an edge that has already been serialized.
* "Skip edge" is an edge that contains additional information for logical routing purposes.
* "Skip target" is an edge used as a destination for a skip edge
*/
void EdgeUtils::collapse_chain(const RoutableEdge& edge) {
assert(is_leak_edge(edge), "invariant");
// attempt to locate an already processed edge inside current leak context (if any)
size_t first_processed_distance = 0;
const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance);
if (first_processed_edge == NULL) {
return;
}
if (first_processed_edge->processed()) {
collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance);
} else {
collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance);
}
assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant");
}

@ -28,15 +28,17 @@
#include "memory/allocation.hpp"
class Edge;
class RoutableEdge;
class Symbol;
class EdgeUtils : public AllStatic {
public:
static bool is_leak_edge(const Edge& edge);
static const size_t leak_context = 100;
static const size_t root_context = 100;
static const size_t max_ref_chain_depth = leak_context + root_context;
static bool is_leak_edge(const Edge& edge);
static const Edge* root(const Edge& edge);
static bool is_root(const Edge& edge);
static const Edge* ancestor(const Edge& edge, size_t distance);
static bool is_array_element(const Edge& edge);
static int array_index(const Edge& edge);
@ -44,8 +46,6 @@ class EdgeUtils : public AllStatic {
static const Symbol* field_name_symbol(const Edge& edge);
static jshort field_modifiers(const Edge& edge);
static void collapse_chain(const RoutableEdge& edge);
};
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGEUTILS_HPP

@ -0,0 +1,132 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/globalDefinitions.hpp"
PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) :
_sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {}
/* The EdgeQueue is backed by directly managed virtual memory.
* We will attempt to dimension an initial reservation
* in proportion to the size of the heap (represented by heap_region).
* Initial memory reservation: 5% of the heap OR at least 32 Mb
* Commit ratio: 1 : 10 (subject to allocation granularties)
*/
static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
return memory_reservation_bytes;
}
static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
return memory_commit_block_size_bytes;
}
static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
if (edge_queue.reserved_size() > 0) {
log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
}
}
void PathToGcRootsOperation::doit() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(_cutoff_ticks > 0, "invariant");
// The bitset used for marking is dimensioned as a function of the heap size
const MemRegion heap_region = Universe::heap()->reserved_region();
BitSet mark_bits(heap_region);
// The edge queue is dimensioned as a fraction of the heap size
const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
// The initialize() routines will attempt to reserve and allocate backing storage memory.
// Failure to accommodate will render root chain processing impossible.
// As a fallback on failure, just write out the existing samples, flat, without chains.
if (!(mark_bits.initialize() && edge_queue.initialize())) {
log_warning(jfr)("Unable to allocate memory for root chain processing");
return;
}
// Save the original markWord for the potential leak objects,
// to be restored on function exit
ObjectSampleMarker marker;
if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) {
// no valid samples to process
return;
}
// Necessary condition for attempting a root set iteration
Universe::heap()->ensure_parsability(false);
BFSClosure bfs(&edge_queue, _edge_store, &mark_bits);
RootSetClosure<BFSClosure> roots(&bfs);
GranularTimer::start(_cutoff_ticks, 1000000);
roots.process();
if (edge_queue.is_full()) {
// Pathological case where roots don't fit in queue
// Do a depth-first search, but mark roots first
// to avoid walking sideways over roots
DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits);
} else {
bfs.process();
}
GranularTimer::stop();
log_edge_queue_summary(edge_queue);
// Emit old objects including their reference chains as events
EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time());
emitter.write_events(_sampler, _edge_store, _emit_all);
}

@ -0,0 +1,46 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
class EdgeStore;
class ObjectSampler;
// Safepoint operation for finding paths to gc roots
class PathToGcRootsOperation : public OldObjectVMOperation {
private:
ObjectSampler* _sampler;
EdgeStore* const _edge_store;
const int64_t _cutoff_ticks;
const bool _emit_all;
public:
PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all);
virtual void doit();
};
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP

@ -28,11 +28,14 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/synchronizer.hpp"
@ -43,11 +46,11 @@
#include "jvmci/jvmci.hpp"
#endif
RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) :
_edge_queue(edge_queue) {
}
template <typename Delegate>
RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegate) {}
void RootSetClosure::do_oop(oop* ref) {
template <typename Delegate>
void RootSetClosure<Delegate>::do_oop(oop* ref) {
assert(ref != NULL, "invariant");
// We discard unaligned root references because
// our reference tagging scheme will use
@ -61,49 +64,40 @@ void RootSetClosure::do_oop(oop* ref) {
}
assert(is_aligned(ref, HeapWordSize), "invariant");
const oop pointee = *ref;
if (pointee != NULL) {
closure_impl(ref, pointee);
if (*ref != NULL) {
_delegate->do_root(ref);
}
}
void RootSetClosure::do_oop(narrowOop* ref) {
template <typename Delegate>
void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
const oop pointee = RawAccess<>::oop_load(ref);
if (pointee != NULL) {
closure_impl(UnifiedOop::encode(ref), pointee);
_delegate->do_root(UnifiedOop::encode(ref));
}
}
void RootSetClosure::closure_impl(const oop* reference, const oop pointee) {
if (!_edge_queue->is_full()) {
_edge_queue->add(NULL, reference);
}
}
class RootSetClosureMarkScope : public MarkScope {};
void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) {
RootSetClosure rs(edge_queue);
process_roots(&rs);
}
class RootSetClosureMarkScope : public MarkScope {
};
void RootSetClosure::process_roots(OopClosure* closure) {
template <typename Delegate>
void RootSetClosure<Delegate>::process() {
RootSetClosureMarkScope mark_scope;
CLDToOopClosure cldt_closure(closure, ClassLoaderData::_claim_none);
CLDToOopClosure cldt_closure(this, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
CodeBlobToOopClosure blobs(closure, false);
Threads::oops_do(closure, &blobs);
ObjectSynchronizer::oops_do(closure);
Universe::oops_do(closure);
JNIHandles::oops_do(closure);
JvmtiExport::oops_do(closure);
SystemDictionary::oops_do(closure);
Management::oops_do(closure);
StringTable::oops_do(closure);
AOTLoader::oops_do(closure);
JVMCI_ONLY(JVMCI::oops_do(closure);)
CodeBlobToOopClosure blobs(this, false);
Threads::oops_do(this, &blobs);
ObjectSynchronizer::oops_do(this);
Universe::oops_do(this);
JNIHandles::oops_do(this);
JvmtiExport::oops_do(this);
SystemDictionary::oops_do(this);
Management::oops_do(this);
StringTable::oops_do(this);
AOTLoader::oops_do(this);
JVMCI_ONLY(JVMCI::oops_do(this);)
}
template class RootSetClosure<BFSClosure>;
template class RootSetClosure<DFSClosure>;

@ -26,18 +26,14 @@
#define SHARE_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class EdgeQueue;
template <typename Delegate>
class RootSetClosure: public BasicOopIterateClosure {
private:
RootSetClosure(EdgeQueue* edge_queue);
EdgeQueue* _edge_queue;
void closure_impl(const oop* reference, const oop pointee);
Delegate* const _delegate;
public:
static void add_to_queue(EdgeQueue* edge_queue);
static void process_roots(OopClosure* closure);
RootSetClosure(Delegate* delegate);
void process();
virtual void do_oop(oop* reference);
virtual void do_oop(narrowOop* reference);

@ -0,0 +1,148 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
EventEmitter::EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time) :
_start_time(start_time),
_end_time(end_time),
_thread(Thread::current()),
_jfr_thread_local(_thread->jfr_thread_local()),
_thread_id(_thread->jfr_thread_local()->thread_id()) {}
EventEmitter::~EventEmitter() {
// restore / reset thread local stack trace and thread id
_jfr_thread_local->set_thread_id(_thread_id);
_jfr_thread_local->clear_cached_stack_trace();
}
void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all) {
assert(sampler != NULL, "invariant");
ResourceMark rm;
EdgeStore edge_store;
if (cutoff_ticks <= 0) {
// no reference chains
JfrTicks time_stamp = JfrTicks::now();
EventEmitter emitter(time_stamp, time_stamp);
emitter.write_events(sampler, &edge_store, emit_all);
return;
}
// events emitted with reference chains require a safepoint operation
PathToGcRootsOperation op(sampler, &edge_store, cutoff_ticks, emit_all);
VMThread::execute(&op);
}
size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) {
assert(_thread == Thread::current(), "invariant");
assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant");
assert(object_sampler != NULL, "invariant");
assert(edge_store != NULL, "invariant");
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
size_t count = 0;
const ObjectSample* current = object_sampler->first();
while (current != NULL) {
ObjectSample* prev = current->prev();
if (current->is_alive_and_older_than(last_sweep)) {
write_event(current, edge_store);
++count;
}
current = prev;
}
if (count > 0) {
// serialize associated checkpoints and potential chains
ObjectSampleCheckpoint::write(object_sampler, edge_store, emit_all, _thread);
}
return count;
}
static int array_size(const oop object) {
assert(object != NULL, "invariant");
if (object->is_array()) {
return arrayOop(object)->length();
}
return min_jint;
}
void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
assert(sample != NULL, "invariant");
assert(!sample->is_dead(), "invariant");
assert(edge_store != NULL, "invariant");
assert(_jfr_thread_local != NULL, "invariant");
const oop* object_addr = sample->object_addr();
traceid gc_root_id = 0;
const Edge* edge = NULL;
if (SafepointSynchronize::is_at_safepoint()) {
edge = (const Edge*)(*object_addr)->mark();
}
if (edge == NULL) {
// In order to dump out a representation of the event
// even though it was not reachable / too long to reach,
// we need to register a top level edge for this object.
edge = edge_store->put(object_addr);
} else {
gc_root_id = edge_store->gc_root_id(edge);
}
assert(edge != NULL, "invariant");
const traceid object_id = edge_store->get_id(edge);
assert(object_id != 0, "invariant");
EventOldObjectSample e(UNTIMED);
e.set_starttime(_start_time);
e.set_endtime(_end_time);
e.set_allocationTime(sample->allocation_time());
e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
e.set_object(object_id);
e.set_arrayElements(array_size(edge->pointee()));
e.set_root(gc_root_id);
// Temporarily assigning both the stack trace id and thread id
// onto the thread local data structure of the emitter thread (for the duration
// of the commit() call). This trick provides a means to override
// the event generation mechanism by injecting externally provided id's.
// At this particular location, it allows us to emit an old object event
// supplying information from where the actual sampling occurred.
_jfr_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
assert(sample->has_thread(), "invariant");
_jfr_thread_local->set_thread_id(sample->thread_id());
e.commit();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,50 +22,37 @@
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
#define SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
#include "runtime/vmOperations.hpp"
#include "memory/allocation.hpp"
#include "jfr/utilities/jfrTime.hpp"
typedef u8 traceid;
class BFSClosure;
class EdgeStore;
class EdgeQueue;
class JfrThreadData;
class JfrThreadLocal;
class ObjectSample;
class ObjectSampler;
class Thread;
class VMThread;
// Safepoint operation for emitting object sample events
class EmitEventOperation : public VM_Operation {
class EventEmitter : public CHeapObj<mtTracing> {
friend class LeakProfiler;
friend class PathToGcRootsOperation;
private:
jlong _cutoff_ticks;
bool _emit_all;
VMThread* _vm_thread;
JfrThreadLocal* _vm_thread_local;
ObjectSampler* _object_sampler;
const JfrTicks& _start_time;
const JfrTicks& _end_time;
Thread* _thread;
JfrThreadLocal* _jfr_thread_local;
traceid _thread_id;
EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time);
~EventEmitter();
void write_event(const ObjectSample* sample, EdgeStore* edge_store);
int write_events(EdgeStore* edge_store);
size_t write_events(ObjectSampler* sampler, EdgeStore* store, bool emit_all);
public:
EmitEventOperation(jlong cutoff_ticks, bool emit_all) :
_cutoff_ticks(cutoff_ticks),
_emit_all(emit_all),
_vm_thread(NULL),
_vm_thread_local(NULL),
_object_sampler(NULL) {
}
VMOp_Type type() const {
return VMOp_GC_HeapInspection;
}
Mode evaluation_mode() const {
return _safepoint;
}
virtual void doit();
static void emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all);
};
#endif // SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP

@ -181,21 +181,18 @@ class SampleMark {
}
};
void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) {
assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant");
void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload) {
if (!writer.has_data()) {
if (!class_unload) {
LeakProfiler::resume();
}
assert(LeakProfiler::is_running(), "invariant");
return;
}
assert(writer.has_data(), "invariant");
const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
// Class unload implies a safepoint.
// Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
// Therefore: direct access the object sampler instance is safe.
const ObjectSampler* const object_sampler = ObjectSampler::sampler();
assert(object_sampler != NULL, "invariant");
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
@ -203,80 +200,71 @@ void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unl
CheckpointInstall install(h_cp);
if (class_unload) {
if (last != NULL) {
// all samples need the class unload information
do_samples(last, NULL, install);
}
assert(LeakProfiler::is_running(), "invariant");
// all samples need class unload information
do_samples(last, NULL, install);
return;
}
// only new samples since last resolved checkpoint
if (last != last_resolved) {
do_samples(last, last_resolved, install);
if (resume) {
const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
}
}
assert(LeakProfiler::is_suspended(), "invariant");
if (resume) {
LeakProfiler::resume();
assert(LeakProfiler::is_running(), "invariant");
const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
}
}
void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) {
void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
assert(sampler != NULL, "invariant");
assert(edge_store != NULL, "invariant");
assert(thread != NULL, "invariant");
static bool types_registered = false;
if (!types_registered) {
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
types_registered = true;
}
const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
assert(object_sampler != NULL, "invariant");
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
ObjectSample* const last = const_cast<ObjectSample*>(sampler->last());
{
JfrCheckpointWriter writer(false, false, thread);
CheckpointWrite checkpoint_write(writer, last_sweep);
do_samples(last, NULL, checkpoint_write);
}
CheckpointStateReset state_reset(last_sweep);
do_samples(last, NULL, state_reset);
if (!edge_store->is_empty()) {
// java object and chain representations
JfrCheckpointWriter writer(false, true, thread);
ObjectSampleWriter osw(writer, edge_store);
edge_store->iterate_edges(osw);
edge_store->iterate(osw);
}
}
WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) :
_stack_trace_repo(repo) {
int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) {
assert(object_sampler != NULL, "invariant");
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
if (last == NULL) {
return 0;
}
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
SampleMark mark(marker, last_sweep);
do_samples(last, NULL, mark);
return mark.count();
}
WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) :
_sampler(sampler), _stack_trace_repo(repo) {}
bool WriteObjectSampleStacktrace::process() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
if (!LeakProfiler::is_running()) {
return true;
}
// Suspend the LeakProfiler subsystem
// to ensure stable samples even
// after we return from the safepoint.
LeakProfiler::suspend();
assert(!LeakProfiler::is_running(), "invariant");
assert(LeakProfiler::is_suspended(), "invariant");
assert(LeakProfiler::is_running(), "invariant");
assert(_sampler != NULL, "invariant");
const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
assert(object_sampler != NULL, "invariant");
assert(LeakProfiler::is_suspended(), "invariant");
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
const ObjectSample* const last_resolved = object_sampler->last_resolved();
ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last());
const ObjectSample* const last_resolved = _sampler->last_resolved();
if (last == last_resolved) {
assert(LeakProfiler::is_suspended(), "invariant");
return true;
}
@ -294,27 +282,13 @@ bool WriteObjectSampleStacktrace::process() {
}
if (count == 0) {
writer.set_context(ctx);
assert(LeakProfiler::is_suspended(), "invariant");
return true;
}
assert(count > 0, "invariant");
writer.write_count((u4)count, count_offset);
JfrStackTraceRepository::write_metadata(writer);
ObjectSampleCheckpoint::install(writer, false, false);
assert(LeakProfiler::is_suspended(), "invariant");
// install the stacktrace checkpoint information to the candidates
ObjectSampleCheckpoint::install(writer, false);
return true;
}
int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) {
const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
assert(object_sampler != NULL, "invariant");
ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
if (last == NULL) {
return 0;
}
const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
SampleMark mark(marker, last_sweep);
do_samples(last, NULL, mark);
return mark.count();
}

@ -26,25 +26,26 @@
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
#include "memory/allocation.hpp"
#include "utilities/exceptions.hpp"
class EdgeStore;
class JfrStackTraceRepository;
class JfrCheckpointWriter;
class JfrStackTraceRepository;
class ObjectSampleMarker;
class ObjectSampler;
class ObjectSampleCheckpoint : AllStatic {
public:
static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume);
static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread);
static int mark(ObjectSampleMarker& marker, bool emit_all);
static void install(JfrCheckpointWriter& writer, bool class_unload);
static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
};
class WriteObjectSampleStacktrace : public StackObj {
private:
ObjectSampler* const _sampler;
JfrStackTraceRepository& _stack_trace_repo;
public:
WriteObjectSampleStacktrace(JfrStackTraceRepository& repo);
WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo);
bool process();
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -350,7 +350,7 @@ int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet*
return 1;
}
static traceid get_root_description_info_id(const Edge& edge, traceid id) {
static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) {
assert(edge.is_root(), "invariant");
if (EdgeUtils::is_leak_edge(edge)) {
return 0;
@ -518,7 +518,7 @@ static void write_root_descriptors(JfrCheckpointWriter& writer) {
}
}
static void add_old_object_sample_info(const Edge* current, traceid id) {
static void add_old_object_sample_info(const StoredEdge* current, traceid id) {
assert(current != NULL, "invariant");
if (sample_infos == NULL) {
sample_infos = new SampleInfo();
@ -528,11 +528,11 @@ static void add_old_object_sample_info(const Edge* current, traceid id) {
assert(oosi != NULL, "invariant");
oosi->_id = id;
oosi->_data._object = current->pointee();
oosi->_data._reference_id = current->is_root() ? (traceid)0 : id;
oosi->_data._reference_id = current->parent() == NULL ? (traceid)0 : id;
sample_infos->store(oosi);
}
static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) {
static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) {
assert(current != NULL, "invariant");
if (ref_infos == NULL) {
ref_infos = new RefInfo();
@ -544,37 +544,43 @@ static void add_reference_info(const RoutableEdge* current, traceid id, traceid
ri->_id = id;
ri->_data._array_info_id = !current->is_skip_edge() ? get_array_info_id(*current, id) : 0;
ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ?
get_field_info_id(*current) : (traceid)0;
ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? get_field_info_id(*current) : (traceid)0;
ri->_data._old_object_sample_id = parent_id;
ri->_data._skip = current->skip_length();
ref_infos->store(ri);
}
static traceid add_root_info(const Edge* root, traceid id) {
assert(root != NULL, "invariant");
assert(root->is_root(), "invariant");
return get_root_description_info_id(*root, id);
static bool is_gc_root(const StoredEdge* current) {
assert(current != NULL, "invariant");
return current->parent() == NULL && current->gc_root_id() != 0;
}
void ObjectSampleWriter::write(const RoutableEdge* edge) {
static traceid add_gc_root_info(const StoredEdge* root, traceid id) {
assert(root != NULL, "invariant");
assert(is_gc_root(root), "invariant");
return get_gc_root_description_info_id(*root, id);
}
void ObjectSampleWriter::write(const StoredEdge* edge) {
assert(edge != NULL, "invariant");
const traceid id = _store->get_id(edge);
add_old_object_sample_info(edge, id);
const RoutableEdge* parent = edge->logical_parent();
const StoredEdge* const parent = edge->parent();
if (parent != NULL) {
add_reference_info(edge, id, _store->get_id(parent));
} else {
assert(edge->is_root(), "invariant");
add_root_info(edge, id);
if (is_gc_root(edge)) {
assert(edge->gc_root_id() == id, "invariant");
add_gc_root_info(edge, id);
}
}
}
ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) :
ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
_writer(writer),
_store(store) {
assert(store != NULL, "invariant");
assert(store->number_of_entries() > 0, "invariant");
assert(!store->is_empty(), "invariant");
sample_infos = NULL;
ref_infos = NULL;
array_infos = NULL;
@ -590,26 +596,7 @@ ObjectSampleWriter::~ObjectSampleWriter() {
write_root_descriptors(_writer);
}
void ObjectSampleWriter::write_chain(const RoutableEdge& edge) {
assert(EdgeUtils::is_leak_edge(edge), "invariant");
if (edge.processed()) {
return;
}
EdgeUtils::collapse_chain(edge);
const RoutableEdge* current = &edge;
while (current != NULL) {
if (current->processed()) {
return;
}
write(current);
current->set_processed();
current = current->logical_parent();
}
}
bool ObjectSampleWriter::operator()(const RoutableEdge& edge) {
if (EdgeUtils::is_leak_edge(edge)) {
write_chain(edge);
}
bool ObjectSampleWriter::operator()(StoredEdge& e) {
write(&e);
return true;
}

@ -30,21 +30,17 @@
class Edge;
class EdgeStore;
class JfrCheckpointWriter;
class RoutableEdge;
class StoredEdge;
class ObjectSampleWriter : public StackObj {
private:
JfrCheckpointWriter& _writer;
const EdgeStore* const _store;
void write(const RoutableEdge* edge);
void write_chain(const RoutableEdge& edge);
EdgeStore* const _store;
void write(const StoredEdge* edge);
public:
ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store);
ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store);
~ObjectSampleWriter();
bool operator()(const RoutableEdge& edge);
bool operator()(StoredEdge& edge);
};
#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP

@ -25,8 +25,8 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
#include "memory/allocation.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
struct RootCallbackInfo {

@ -1,236 +0,0 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/emitEventOperation.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/support/jfrThreadId.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
/* The EdgeQueue is backed by directly managed virtual memory.
* We will attempt to dimension an initial reservation
* in proportion to the size of the heap (represented by heap_region).
* Initial memory reservation: 5% of the heap OR at least 32 Mb
* Commit ratio: 1 : 10 (subject to allocation granularties)
*/
static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
return memory_reservation_bytes;
}
static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
return memory_commit_block_size_bytes;
}
static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
if (edge_queue.reserved_size() > 0) {
log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
}
}
void EmitEventOperation::doit() {
assert(LeakProfiler::is_running(), "invariant");
_object_sampler = LeakProfiler::object_sampler();
assert(_object_sampler != NULL, "invariant");
_vm_thread = VMThread::vm_thread();
assert(_vm_thread == Thread::current(), "invariant");
_vm_thread_local = _vm_thread->jfr_thread_local();
assert(_vm_thread_local != NULL, "invariant");
assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
// The VM_Operation::evaluate() which invoked doit()
// contains a top level ResourceMark
// save the original markWord for the potential leak objects
// to be restored on function exit
ObjectSampleMarker marker;
if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
return;
}
EdgeStore edge_store;
GranularTimer::start(_cutoff_ticks, 1000000);
if (_cutoff_ticks <= 0) {
// no chains
write_events(&edge_store);
return;
}
assert(_cutoff_ticks > 0, "invariant");
// The bitset used for marking is dimensioned as a function of the heap size
const MemRegion heap_region = Universe::heap()->reserved_region();
BitSet mark_bits(heap_region);
// The edge queue is dimensioned as a fraction of the heap size
const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
// The initialize() routines will attempt to reserve and allocate backing storage memory.
// Failure to accommodate will render root chain processing impossible.
// As a fallback on failure, just write out the existing samples, flat, without chains.
if (!(mark_bits.initialize() && edge_queue.initialize())) {
log_warning(jfr)("Unable to allocate memory for root chain processing");
write_events(&edge_store);
return;
}
// necessary condition for attempting a root set iteration
Universe::heap()->ensure_parsability(false);
RootSetClosure::add_to_queue(&edge_queue);
if (edge_queue.is_full()) {
// Pathological case where roots don't fit in queue
// Do a depth-first search, but mark roots first
// to avoid walking sideways over roots
DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
} else {
BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
bfs.process();
}
GranularTimer::stop();
write_events(&edge_store);
log_edge_queue_summary(edge_queue);
}
int EmitEventOperation::write_events(EdgeStore* edge_store) {
assert(_object_sampler != NULL, "invariant");
assert(edge_store != NULL, "invariant");
assert(_vm_thread != NULL, "invariant");
assert(_vm_thread_local != NULL, "invariant");
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
// save thread id in preparation for thread local trace data manipulations
const traceid vmthread_id = _vm_thread_local->thread_id();
assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
int count = 0;
const ObjectSample* current = _object_sampler->first();
while (current != NULL) {
ObjectSample* prev = current->prev();
if (current->is_alive_and_older_than(last_sweep)) {
write_event(current, edge_store);
++count;
}
current = prev;
}
// restore thread local stack trace and thread id
_vm_thread_local->set_thread_id(vmthread_id);
_vm_thread_local->clear_cached_stack_trace();
assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
if (count > 0) {
// serialize assoicated checkpoints
ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
}
return count;
}
static int array_size(const oop object) {
assert(object != NULL, "invariant");
if (object->is_array()) {
return arrayOop(object)->length();
}
return min_jint;
}
void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
assert(sample != NULL, "invariant");
assert(!sample->is_dead(), "invariant");
assert(edge_store != NULL, "invariant");
assert(_vm_thread_local != NULL, "invariant");
const oop* object_addr = sample->object_addr();
assert(*object_addr != NULL, "invariant");
const Edge* edge = (const Edge*)(*object_addr)->mark();
traceid gc_root_id = 0;
if (edge == NULL) {
// In order to dump out a representation of the event
// even though it was not reachable / too long to reach,
// we need to register a top level edge for this object
Edge e(NULL, object_addr);
edge_store->add_chain(&e, 1);
edge = (const Edge*)(*object_addr)->mark();
} else {
gc_root_id = edge_store->get_root_id(edge);
}
assert(edge != NULL, "invariant");
assert(edge->pointee() == *object_addr, "invariant");
const traceid object_id = edge_store->get_id(edge);
assert(object_id != 0, "invariant");
EventOldObjectSample e(UNTIMED);
e.set_starttime(GranularTimer::start_time());
e.set_endtime(GranularTimer::end_time());
e.set_allocationTime(sample->allocation_time());
e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
e.set_object(object_id);
e.set_arrayElements(array_size(*object_addr));
e.set_root(gc_root_id);
// Temporarily assigning both the stack trace id and thread id
// onto the thread local data structure of the VMThread (for the duration
// of the commit() call). This trick provides a means to override
// the event generation mechanism by injecting externally provided id's.
// Here, in particular, this allows us to emit an old object event
// supplying information from where the actual sampling occurred.
_vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
assert(sample->has_thread(), "invariant");
_vm_thread_local->set_thread_id(sample->thread_id());
e.commit();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,25 +23,31 @@
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/emitEventOperation.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/startOperation.hpp"
#include "jfr/leakprofiler/stopOperation.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/ostream.hpp"
// Only to be updated during safepoint
ObjectSampler* LeakProfiler::_object_sampler = NULL;
bool LeakProfiler::is_running() {
return ObjectSampler::is_created();
}
bool LeakProfiler::start(int sample_count) {
if (is_running()) {
return true;
}
// Allows user to disable leak profiler on command line by setting queue size to zero.
if (sample_count == 0) {
return false;
}
static volatile jbyte suspended = 0;
bool LeakProfiler::start(jint sample_count) {
if (UseZGC) {
log_warning(jfr)("LeakProfiler is currently not supported in combination with ZGC");
return false;
@ -52,49 +58,56 @@ bool LeakProfiler::start(jint sample_count) {
return false;
}
if (_object_sampler != NULL) {
// already started
return true;
assert(!is_running(), "invariant");
assert(sample_count > 0, "invariant");
// schedule the safepoint operation for installing the object sampler
StartOperation op(sample_count);
VMThread::execute(&op);
if (!is_running()) {
log_trace(jfr, system)("Object sampling could not be started because the sampler could not be allocated");
return false;
}
// Allows user to disable leak profiler on command line by setting queue size to zero.
if (sample_count > 0) {
StartOperation op(sample_count);
VMThread::execute(&op);
return _object_sampler != NULL;
}
return false;
assert(is_running(), "invariant");
log_trace(jfr, system)("Object sampling started");
return true;
}
bool LeakProfiler::stop() {
if (_object_sampler == NULL) {
// already stopped/not started
return true;
if (!is_running()) {
return false;
}
// schedule the safepoint operation for uninstalling and destroying the object sampler
StopOperation op;
VMThread::execute(&op);
return _object_sampler == NULL;
assert(!is_running(), "invariant");
log_trace(jfr, system)("Object sampling stopped");
return true;
}
void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) {
void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) {
if (!is_running()) {
return;
}
EmitEventOperation op(cutoff_ticks, emit_all);
VMThread::execute(&op);
// exclusive access to object sampler instance
ObjectSampler* const sampler = ObjectSampler::acquire();
assert(sampler != NULL, "invariant");
EventEmitter::emit(sampler, cutoff_ticks, emit_all);
ObjectSampler::release();
}
void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(),
"Leak Profiler::oops_do(...) may only be called during safepoint");
if (_object_sampler != NULL) {
_object_sampler->oops_do(is_alive, f);
if (is_running()) {
ObjectSampler::oops_do(is_alive, f);
}
}
void LeakProfiler::sample(HeapWord* object,
size_t size,
JavaThread* thread) {
void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) {
assert(is_running(), "invariant");
assert(thread != NULL, "invariant");
assert(thread->thread_state() == _thread_in_vm, "invariant");
@ -104,39 +117,5 @@ void LeakProfiler::sample(HeapWord* object,
return;
}
_object_sampler->add(object, size, thread);
}
ObjectSampler* LeakProfiler::object_sampler() {
assert(is_suspended() || SafepointSynchronize::is_at_safepoint(),
"Leak Profiler::object_sampler() may only be called during safepoint");
return _object_sampler;
}
void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) {
assert(SafepointSynchronize::is_at_safepoint(),
"Leak Profiler::set_object_sampler() may only be called during safepoint");
_object_sampler = object_sampler;
}
bool LeakProfiler::is_running() {
return _object_sampler != NULL && !suspended;
}
bool LeakProfiler::is_suspended() {
return _object_sampler != NULL && suspended;
}
void LeakProfiler::resume() {
assert(is_suspended(), "invariant");
OrderAccess::storestore();
Atomic::store((jbyte)0, &suspended);
assert(is_running(), "invariant");
}
void LeakProfiler::suspend() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(_object_sampler != NULL, "invariant");
assert(!is_suspended(), "invariant");
suspended = (jbyte)1; // safepoint visible
ObjectSampler::sample(object, size, thread);
}

@ -28,36 +28,16 @@
#include "memory/allocation.hpp"
class BoolObjectClosure;
class ObjectSampler;
class OopClosure;
class JavaThread;
class Thread;
class LeakProfiler : public AllStatic {
friend class ClassUnloadTypeSet;
friend class EmitEventOperation;
friend class ObjectSampleCheckpoint;
friend class StartOperation;
friend class StopOperation;
friend class TypeSet;
friend class WriteObjectSampleStacktrace;
private:
static ObjectSampler* _object_sampler;
static void set_object_sampler(ObjectSampler* object_sampler);
static ObjectSampler* object_sampler();
static void suspend();
static void resume();
static bool is_suspended();
public:
static bool start(jint sample_count);
static bool start(int sample_count);
static bool stop();
static void emit_events(jlong cutoff_ticks, bool emit_all);
static bool is_running();
static void emit_events(int64_t cutoff_ticks, bool emit_all);
static void sample(HeapWord* object, size_t size, JavaThread* thread);
// Called by GC

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -21,6 +21,7 @@
* questions.
*
*/
#include "precompiled.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
@ -35,8 +36,18 @@
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
static ObjectSampler* _instance = NULL;
static ObjectSampler& instance() {
assert(_instance != NULL, "invariant");
return *_instance;
}
ObjectSampler::ObjectSampler(size_t size) :
_priority_queue(new SamplePriorityQueue(size)),
_list(new SampleList(size)),
@ -44,7 +55,6 @@ ObjectSampler::ObjectSampler(size_t size) :
_total_allocated(0),
_threshold(0),
_size(size),
_tryLock(0),
_dead_samples(false) {}
ObjectSampler::~ObjectSampler() {
@ -54,32 +64,110 @@ ObjectSampler::~ObjectSampler() {
_list = NULL;
}
void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
bool ObjectSampler::create(size_t size) {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(_instance == NULL, "invariant");
_instance = new ObjectSampler(size);
return _instance != NULL;
}
bool ObjectSampler::is_created() {
return _instance != NULL;
}
ObjectSampler* ObjectSampler::sampler() {
assert(is_created(), "invariant");
return _instance;
}
void ObjectSampler::destroy() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
if (_instance != NULL) {
ObjectSampler* const sampler = _instance;
_instance = NULL;
delete sampler;
}
}
static volatile int _lock = 0;
ObjectSampler* ObjectSampler::acquire() {
assert(is_created(), "invariant");
while (Atomic::cmpxchg(1, &_lock, 0) == 1) {}
return _instance;
}
void ObjectSampler::release() {
assert(is_created(), "invariant");
OrderAccess::fence();
_lock = 0;
}
static traceid get_thread_id(JavaThread* thread) {
assert(thread != NULL, "invariant");
const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0;
if (thread->threadObj() == NULL) {
return 0;
}
const JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
if (!tl->has_thread_checkpoint()) {
JfrCheckpointManager::create_thread_checkpoint(thread);
}
assert(tl->has_thread_checkpoint(), "invariant");
return tl->thread_id();
}
// Populates the thread local stack frames, but does not add them
// to the stacktrace repository (...yet, see stacktrace_id() below)
//
void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
assert(thread != NULL, "invariant");
if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0);
}
}
// We were successful in acquiring the try lock and have been selected for adding a sample.
// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
//
traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
assert(stacktrace->hash() != 0, "invariant");
const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread);
thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash());
return stacktrace_id;
}
void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
assert(thread != NULL, "invariant");
assert(is_created(), "invariant");
const traceid thread_id = get_thread_id(thread);
if (thread_id == 0) {
return;
}
assert(thread_id != 0, "invariant");
if (!thread->jfr_thread_local()->has_thread_checkpoint()) {
JfrCheckpointManager::create_thread_checkpoint(thread);
assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
}
const JfrThreadLocal* const tl = thread->jfr_thread_local();
JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
fill_stacktrace(&stacktrace, thread);
traceid stack_trace_id = 0;
unsigned int stack_trace_hash = 0;
if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash);
thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash);
}
JfrTryLock tryLock(&_tryLock);
// try enter critical section
JfrTryLock tryLock(&_lock);
if (!tryLock.has_lock()) {
log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
return;
}
instance().add(obj, allocated, thread_id, &stacktrace, thread);
}
void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
assert(thread_id != 0, "invariant");
assert(thread != NULL, "invariant");
assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
if (_dead_samples) {
scavenge();
assert(!_dead_samples, "invariant");
@ -101,13 +189,13 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
}
assert(sample != NULL, "invariant");
assert(thread_id != 0, "invariant");
sample->set_thread_id(thread_id);
sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
if (stack_trace_id != 0) {
sample->set_stack_trace_id(stack_trace_id);
sample->set_stack_trace_hash(stack_trace_hash);
const unsigned int stacktrace_hash = stacktrace->hash();
if (stacktrace_hash != 0) {
sample->set_stack_trace_id(stacktrace_id(stacktrace, thread));
sample->set_stack_trace_hash(stacktrace_hash);
}
sample->set_span(allocated);
@ -118,6 +206,53 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
_priority_queue->push(sample);
}
void ObjectSampler::scavenge() {
ObjectSample* current = _list->last();
while (current != NULL) {
ObjectSample* next = current->next();
if (current->is_dead()) {
remove_dead(current);
}
current = next;
}
_dead_samples = false;
}
void ObjectSampler::remove_dead(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample->is_dead(), "invariant");
ObjectSample* const previous = sample->prev();
// push span on to previous
if (previous != NULL) {
_priority_queue->remove(previous);
previous->add_span(sample->span());
_priority_queue->push(previous);
}
_priority_queue->remove(sample);
_list->release(sample);
}
void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
assert(is_created(), "invariant");
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
ObjectSampler& sampler = instance();
ObjectSample* current = sampler._list->last();
while (current != NULL) {
ObjectSample* next = current->next();
if (!current->is_dead()) {
if (is_alive->do_object_b(current->object())) {
// The weakly referenced object is alive, update pointer
f->do_oop(const_cast<oop*>(current->object_addr()));
} else {
current->set_dead();
sampler._dead_samples = true;
}
}
current = next;
}
sampler._last_sweep = JfrTicks::now();
}
const ObjectSample* ObjectSampler::last() const {
return _list->last();
}
@ -134,50 +269,6 @@ void ObjectSampler::set_last_resolved(const ObjectSample* sample) {
_list->set_last_resolved(sample);
}
void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
ObjectSample* current = _list->last();
while (current != NULL) {
ObjectSample* next = current->next();
if (!current->is_dead()) {
if (is_alive->do_object_b(current->object())) {
// The weakly referenced object is alive, update pointer
f->do_oop(const_cast<oop*>(current->object_addr()));
} else {
current->set_dead();
_dead_samples = true;
}
}
current = next;
}
_last_sweep = JfrTicks::now();
}
void ObjectSampler::remove_dead(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample->is_dead(), "invariant");
ObjectSample* const previous = sample->prev();
// push span on to previous
if (previous != NULL) {
_priority_queue->remove(previous);
previous->add_span(sample->span());
_priority_queue->push(previous);
}
_priority_queue->remove(sample);
_list->release(sample);
}
void ObjectSampler::scavenge() {
ObjectSample* current = _list->last();
while (current != NULL) {
ObjectSample* next = current->next();
if (current->is_dead()) {
remove_dead(current);
}
current = next;
}
_dead_samples = false;
}
int ObjectSampler::item_count() const {
return _priority_queue->count();
}
@ -189,7 +280,7 @@ const ObjectSample* ObjectSampler::item_at(int index) const {
ObjectSample* ObjectSampler::item_at(int index) {
return const_cast<ObjectSample*>(
const_cast<const ObjectSampler*>(this)->item_at(index)
);
);
}
const JfrTicks& ObjectSampler::last_sweep() const {

@ -28,7 +28,10 @@
#include "memory/allocation.hpp"
#include "jfr/utilities/jfrTime.hpp"
typedef u8 traceid;
class BoolObjectClosure;
class JfrStackTrace;
class OopClosure;
class ObjectSample;
class ObjectSampler;
@ -40,11 +43,13 @@ class Thread;
// making sure the samples are evenly distributed as
// new entries are added and removed.
class ObjectSampler : public CHeapObj<mtTracing> {
friend class EventEmitter;
friend class JfrRecorderService;
friend class LeakProfiler;
friend class ObjectSampleCheckpoint;
friend class StartOperation;
friend class StopOperation;
friend class EmitEventOperation;
friend class ObjectSampleCheckpoint;
friend class WriteObjectSampleStacktrace;
private:
SamplePriorityQueue* _priority_queue;
SampleList* _list;
@ -52,20 +57,33 @@ class ObjectSampler : public CHeapObj<mtTracing> {
size_t _total_allocated;
size_t _threshold;
size_t _size;
volatile int _tryLock;
bool _dead_samples;
// Lifecycle
explicit ObjectSampler(size_t size);
~ObjectSampler();
static bool create(size_t size);
static bool is_created();
static ObjectSampler* sampler();
static void destroy();
void add(HeapWord* object, size_t size, JavaThread* thread);
void remove_dead(ObjectSample* sample);
// For operations that require exclusive access (non-safepoint)
static ObjectSampler* acquire();
static void release();
// Stacktrace
static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread);
traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread);
// Sampling
static void sample(HeapWord* object, size_t size, JavaThread* thread);
void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread);
void scavenge();
void remove_dead(ObjectSample* sample);
// Called by GC
void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
public:
const ObjectSample* item_at(int index) const;
ObjectSample* item_at(int index);
int item_count() const;

@ -25,35 +25,18 @@
#ifndef SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
#define SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "logging/log.hpp"
#include "runtime/vmOperations.hpp"
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
// Safepoint operation for starting leak profiler object sampler
class StartOperation : public VM_Operation {
// Safepoint operation for creating and starting the leak profiler object sampler
class StartOperation : public OldObjectVMOperation {
private:
jlong _sample_count;
int _sample_count;
public:
StartOperation(jlong sample_count) :
_sample_count(sample_count) {
}
Mode evaluation_mode() const {
return _safepoint;
}
VMOp_Type type() const {
return VMOp_GC_HeapInspection;
}
StartOperation(int sample_count) : _sample_count(sample_count) {}
virtual void doit() {
assert(!LeakProfiler::is_running(), "invariant");
jint queue_size = JfrOptionSet::old_object_queue_size();
LeakProfiler::set_object_sampler(new ObjectSampler(queue_size));
log_trace(jfr, system)( "Object sampling started");
ObjectSampler::create(_sample_count);
}
};

@ -25,31 +25,14 @@
#ifndef SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
#define SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "logging/log.hpp"
#include "runtime/vmOperations.hpp"
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
// Safepoint operation for stopping leak profiler object sampler
class StopOperation : public VM_Operation {
// Safepoint operation for stopping and destroying the leak profiler object sampler
class StopOperation : public OldObjectVMOperation {
public:
StopOperation() {}
Mode evaluation_mode() const {
return _safepoint;
}
VMOp_Type type() const {
return VMOp_GC_HeapInspection;
}
virtual void doit() {
assert(LeakProfiler::is_running(), "invariant");
ObjectSampler* object_sampler = LeakProfiler::object_sampler();
delete object_sampler;
LeakProfiler::set_object_sampler(NULL);
log_trace(jfr, system)( "Object sampling stopped");
ObjectSampler::destroy();
}
};

@ -0,0 +1,41 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
#include "runtime/vmOperations.hpp"
class OldObjectVMOperation : public VM_Operation {
public:
Mode evaluation_mode() const {
return _safepoint;
}
VMOp_Type type() const {
return VMOp_JFROldObject;
}
};
#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP

@ -311,7 +311,7 @@ void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
if (LeakProfiler::is_running()) {
JfrCheckpointWriter leakp_writer(false, true, Thread::current());
type_set.write(writer, &leakp_writer);
ObjectSampleCheckpoint::install(leakp_writer, true, true);
ObjectSampleCheckpoint::install(leakp_writer, true);
return;
}
type_set.write(writer, NULL);
@ -319,10 +319,10 @@ void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
void TypeSet::serialize(JfrCheckpointWriter& writer) {
TypeSetSerialization type_set(false);
if (LeakProfiler::is_suspended()) {
if (LeakProfiler::is_running()) {
JfrCheckpointWriter leakp_writer(false, true, Thread::current());
type_set.write(writer, &leakp_writer);
ObjectSampleCheckpoint::install(leakp_writer, false, true);
ObjectSampleCheckpoint::install(leakp_writer, false);
return;
}
type_set.write(writer, NULL);

@ -24,7 +24,9 @@
#include "precompiled.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
@ -335,6 +337,7 @@ void JfrRecorderService::prepare_for_vm_error_rotation() {
open_new_chunk(true);
}
_checkpoint_manager.register_service_thread(Thread::current());
JfrMetadataEvent::lock();
}
void JfrRecorderService::open_new_chunk(bool vm_error) {
@ -398,6 +401,11 @@ static void write_stacktrace_checkpoint(JfrStackTraceRepository& stack_trace_rep
write_stack_trace_checkpoint.process();
}
static void write_object_sample_stacktrace(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repository) {
WriteObjectSampleStacktrace object_sample_stacktrace(sampler, stack_trace_repository);
object_sample_stacktrace.process();
}
static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
WriteStringPool write_string_pool(string_pool);
WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
@ -418,8 +426,9 @@ static void write_stringpool_checkpoint_safepoint(JfrStringPool& string_pool, Jf
// write checkpoint epoch transition list->
// write stack trace checkpoint ->
// write string pool checkpoint ->
// write storage ->
// release stream lock
// write object sample stacktraces ->
// write storage ->
// release stream lock
//
void JfrRecorderService::pre_safepoint_write() {
MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
@ -428,6 +437,13 @@ void JfrRecorderService::pre_safepoint_write() {
_checkpoint_manager.write_epoch_transition_mspace();
write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, false);
write_stringpool_checkpoint(_string_pool, _chunkwriter);
if (LeakProfiler::is_running()) {
// Exclusive access to the object sampler instance.
// The sampler is released (unlocked) later in post_safepoint_write.
ObjectSampler* const sampler = ObjectSampler::acquire();
assert(sampler != NULL, "invariant");
write_object_sample_stacktrace(sampler, _stack_trace_repository);
}
_storage.write();
}
@ -436,16 +452,10 @@ void JfrRecorderService::invoke_safepoint_write() {
VMThread::execute(&safepoint_task);
}
static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_repository) {
WriteObjectSampleStacktrace object_sample_stacktrace(stack_trace_repository);
object_sample_stacktrace.process();
}
//
// safepoint write sequence
//
// lock stream lock ->
// write object sample stacktraces ->
// write stacktrace repository ->
// write string pool ->
// write safepoint dependent types ->
@ -458,7 +468,6 @@ static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_
void JfrRecorderService::safepoint_write() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
write_object_sample_stacktrace(_stack_trace_repository);
write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, true);
write_stringpool_checkpoint_safepoint(_string_pool, _chunkwriter);
_checkpoint_manager.write_safepoint_types();
@ -478,13 +487,14 @@ static int64_t write_metadata_event(JfrChunkWriter& chunkwriter) {
//
// post-safepoint write sequence
//
// lock stream lock ->
// write type set ->
// write checkpoints ->
// write metadata event ->
// write chunk header ->
// close chunk fd ->
// release stream lock
// write type set ->
// release object sampler ->
// lock stream lock ->
// write checkpoints ->
// write metadata event ->
// write chunk header ->
// close chunk fd ->
// release stream lock
//
void JfrRecorderService::post_safepoint_write() {
assert(_chunkwriter.is_valid(), "invariant");
@ -493,6 +503,11 @@ void JfrRecorderService::post_safepoint_write() {
// already tagged artifacts for the previous epoch. We can accomplish this concurrently
// with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint.
_checkpoint_manager.write_type_set();
if (LeakProfiler::is_running()) {
// The object sampler instance was exclusively acquired and locked in pre_safepoint_write.
// Note: There is a dependency on write_type_set() above, ensure the release is subsequent.
ObjectSampler::release();
}
MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
// serialize any outstanding checkpoint memory
_checkpoint_manager.write();
@ -512,11 +527,9 @@ void JfrRecorderService::vm_error_rotation() {
void JfrRecorderService::finalize_current_chunk_on_vm_error() {
assert(_chunkwriter.is_valid(), "invariant");
pre_safepoint_write();
JfrMetadataEvent::lock();
// Do not attempt safepoint dependent operations during emergency dump.
// Optimistically write tagged artifacts.
_checkpoint_manager.shift_epoch();
_checkpoint_manager.write_type_set();
// update time
_chunkwriter.time_stamp_chunk_now();
post_safepoint_write();

@ -164,7 +164,13 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
}
traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
return instance().add_trace(stacktrace);
traceid tid = instance().add_trace(stacktrace);
if (tid == 0) {
stacktrace.resolve_linenos();
tid = instance().add_trace(stacktrace);
}
assert(tid != 0, "invariant");
return tid;
}
traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
@ -187,54 +193,29 @@ traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
}
traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) {
assert(thread == Thread::current(), "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
if (tl->has_cached_stack_trace()) {
*hash = tl->cached_stack_trace_hash();
return tl->cached_stack_trace_id();
}
if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
return 0;
}
JfrStackFrame* frames = tl->stackframes();
if (frames == NULL) {
// pending oom
return 0;
}
assert(frames != NULL, "invariant");
assert(tl->stackframes() == frames, "invariant");
return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth(), hash);
}
traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
JfrStackTrace stacktrace(frames, max_frames);
if (!stacktrace.record_safe(thread, skip)) {
return 0;
}
traceid tid = add(stacktrace);
if (tid == 0) {
stacktrace.resolve_linenos();
tid = add(stacktrace);
}
return tid;
return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
}
traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) {
assert(hash != NULL && *hash == 0, "invariant");
JfrStackTrace stacktrace(frames, max_frames);
if (!stacktrace.record_safe(thread, skip, true)) {
return 0;
traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) {
assert(stacktrace != NULL, "invariant");
assert(thread != NULL, "invariant");
assert(stacktrace->hash() != 0, "invariant");
return add(*stacktrace);
}
bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip) {
assert(thread == Thread::current(), "invariant");
assert(stacktrace != NULL, "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash();
if (cached_stacktrace_hash != 0) {
stacktrace->set_hash(cached_stacktrace_hash);
return true;
}
traceid tid = add(stacktrace);
if (tid == 0) {
stacktrace.resolve_linenos();
tid = add(stacktrace);
}
*hash = stacktrace._hash;
return tid;
return stacktrace->record_safe(thread, skip, true);
}
size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
@ -363,7 +344,7 @@ const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entr
return trace;
}
void JfrStackFrame::resolve_lineno() {
void JfrStackFrame::resolve_lineno() const {
assert(_method, "no method pointer");
assert(_line == 0, "already have linenumber");
_line = _method->line_number_from_bci(_bci);
@ -375,7 +356,7 @@ void JfrStackTrace::set_frame(u4 frame_pos, JfrStackFrame& frame) {
_frames[frame_pos] = frame;
}
void JfrStackTrace::resolve_linenos() {
void JfrStackTrace::resolve_linenos() const {
for(unsigned int i = 0; i < _nr_of_frames; i++) {
_frames[i].resolve_lineno();
}

@ -36,9 +36,9 @@ class Method;
class JfrStackFrame {
private:
const Method* _method;
mutable const Method* _method;
traceid _methodid;
int _line;
mutable int _line;
int _bci;
u1 _type;
@ -58,7 +58,7 @@ class JfrStackFrame {
bool equals(const JfrStackFrame& rhs) const;
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
void resolve_lineno();
void resolve_lineno() const;
};
class JfrStackTrace : public StackObj {
@ -70,7 +70,7 @@ class JfrStackTrace : public StackObj {
unsigned int _hash;
const u4 _max_frames;
bool _reached_root;
bool _lineno;
mutable bool _lineno;
public:
JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
@ -82,9 +82,10 @@ class JfrStackTrace : public StackObj {
_lineno(false) {}
bool record_thread(JavaThread& thread, frame& frame);
bool record_safe(JavaThread* thread, int skip, bool leakp = false);
void resolve_linenos();
void resolve_linenos() const;
void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
void set_hash(unsigned int hash) { _hash = hash; }
unsigned int hash() const { return _hash; }
void set_frame(u4 frame_pos, JfrStackFrame& frame);
void set_reached_root(bool reached_root) { _reached_root = reached_root; }
bool full_stacktrace() const { return _reached_root; }
@ -128,23 +129,26 @@ class JfrStackTraceRepository : public JfrCHeapObj {
traceid _next_id;
u4 _entries;
size_t write_impl(JfrChunkWriter& cw, bool clear);
traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash);
traceid add_trace(const JfrStackTrace& stacktrace);
const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread);
traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
size_t write_impl(JfrChunkWriter& cw, bool clear);
const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
static void write_metadata(JfrCheckpointWriter& cpw);
static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip);
JfrStackTraceRepository();
static JfrStackTraceRepository& instance();
public:
static JfrStackTraceRepository* create();
bool initialize();
static void destroy();
static JfrStackTraceRepository& instance();
public:
static traceid add(const JfrStackTrace& stacktrace);
static traceid record(Thread* thread, int skip = 0);
static traceid record(Thread* thread, int skip, unsigned int* hash);
traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
size_t write(JfrChunkWriter& cw, bool clear);
size_t clear();

@ -48,10 +48,12 @@ void jfr_clear_stacktrace(Thread* t);
template <typename Event>
class JfrConditionalFlush {
protected:
bool _enabled;
public:
typedef JfrBuffer Type;
JfrConditionalFlush(Thread* t) {
if (jfr_is_event_enabled(Event::eventId)) {
JfrConditionalFlush(Thread* t) : _enabled(jfr_is_event_enabled(Event::eventId)) {
if (_enabled) {
jfr_conditional_flush(Event::eventId, sizeof(Event), t);
}
}
@ -63,7 +65,7 @@ class JfrConditionalFlushWithStacktrace : public JfrConditionalFlush<Event> {
bool _owner;
public:
JfrConditionalFlushWithStacktrace(Thread* t) : JfrConditionalFlush<Event>(t), _t(t), _owner(false) {
if (Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
if (this->_enabled && Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
_owner = jfr_save_stacktrace(t);
}
}

@ -150,9 +150,7 @@ JfrBuffer* JfrThreadLocal::install_java_buffer() const {
JfrStackFrame* JfrThreadLocal::install_stackframes() const {
assert(_stackframes == NULL, "invariant");
_stackdepth = (u4)JfrOptionSet::stackdepth();
guarantee(_stackdepth > 0, "Stackdepth must be > 0");
_stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing);
_stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing);
return _stackframes;
}
@ -163,3 +161,7 @@ ByteSize JfrThreadLocal::trace_id_offset() {
ByteSize JfrThreadLocal::java_event_writer_offset() {
return in_ByteSize(offset_of(JfrThreadLocal, _java_event_writer));
}
u4 JfrThreadLocal::stackdepth() const {
return _stackdepth != 0 ? _stackdepth : (u4)JfrOptionSet::stackdepth();
}

@ -113,9 +113,7 @@ class JfrThreadLocal {
_stackframes = frames;
}
u4 stackdepth() const {
return _stackdepth;
}
u4 stackdepth() const;
void set_stackdepth(u4 depth) {
_stackdepth = depth;

@ -123,7 +123,7 @@ void KlassInfoEntry::print_on(outputStream* st) const {
KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
// Can happen if k is an archived class that we haven't loaded yet.
if (k->java_mirror() == NULL) {
if (k->java_mirror_no_keepalive() == NULL) {
return NULL;
}
@ -719,7 +719,7 @@ size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *fi
ResourceMark rm;
RecordInstanceClosure ric(cit, filter);
Universe::heap()->object_iterate(&ric);
Universe::heap()->safe_object_iterate(&ric);
return ric.missed_count();
}
@ -792,8 +792,5 @@ void HeapInspection::find_instances_at_safepoint(Klass* k, GrowableArray<oop>* r
// Iterate over objects in the heap
FindInstanceClosure fic(k, result);
// If this operation encounters a bad object when using CMS,
// consider using safe_object_iterate() which avoids metadata
// objects that may contain bad references.
Universe::heap()->object_iterate(&fic);
Universe::heap()->safe_object_iterate(&fic);
}

@ -144,18 +144,28 @@ class CLDToOopClosure : public CLDClosure {
void do_cld(ClassLoaderData* cld);
};
// The base class for all concurrent marking closures,
// that participates in class unloading.
// It's used to proxy through the metadata to the oops defined in them.
class MetadataVisitingOopIterateClosure: public OopIterateClosure {
class ClaimMetadataVisitingOopIterateClosure : public OopIterateClosure {
protected:
const int _claim;
public:
MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = NULL) : OopIterateClosure(rd) { }
ClaimMetadataVisitingOopIterateClosure(int claim, ReferenceDiscoverer* rd = NULL) :
OopIterateClosure(rd),
_claim(claim) { }
virtual bool do_metadata() { return true; }
virtual void do_klass(Klass* k);
virtual void do_cld(ClassLoaderData* cld);
};
// The base class for all concurrent marking closures,
// that participates in class unloading.
// It's used to proxy through the metadata to the oops defined in them.
class MetadataVisitingOopIterateClosure: public ClaimMetadataVisitingOopIterateClosure {
public:
MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = NULL);
};
// ObjectClosure is used for iterating through an object space
class ObjectClosure : public Closure {

@ -39,13 +39,17 @@
#include "oops/typeArrayKlass.inline.hpp"
#include "utilities/debug.hpp"
inline void MetadataVisitingOopIterateClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(this, ClassLoaderData::_claim_strong);
// Defaults to strong claiming.
inline MetadataVisitingOopIterateClosure::MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd) :
ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_strong, rd) {}
inline void ClaimMetadataVisitingOopIterateClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(this, _claim);
}
inline void MetadataVisitingOopIterateClosure::do_klass(Klass* k) {
inline void ClaimMetadataVisitingOopIterateClosure::do_klass(Klass* k) {
ClassLoaderData* cld = k->class_loader_data();
MetadataVisitingOopIterateClosure::do_cld(cld);
ClaimMetadataVisitingOopIterateClosure::do_cld(cld);
}
#ifdef ASSERT

@ -767,7 +767,7 @@ void Klass::oop_print_value_on(oop obj, outputStream* st) {
// Size Statistics
void Klass::collect_statistics(KlassSizeStats *sz) const {
sz->_klass_bytes = sz->count(this);
sz->_mirror_bytes = sz->count(java_mirror());
sz->_mirror_bytes = sz->count(java_mirror_no_keepalive());
sz->_secondary_supers_bytes = sz->count_array(secondary_supers());
sz->_ro_bytes += sz->_secondary_supers_bytes;

@ -929,3 +929,91 @@ const Type *MinINode::add_ring( const Type *t0, const Type *t1 ) const {
// Otherwise just MIN them bits.
return TypeInt::make( MIN2(r0->_lo,r1->_lo), MIN2(r0->_hi,r1->_hi), MAX2(r0->_widen,r1->_widen) );
}
//------------------------------add_ring---------------------------------------
const Type *MinFNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeF *r0 = t0->is_float_constant();
const TypeF *r1 = t1->is_float_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
float f0 = r0->getf();
float f1 = r1->getf();
if (f0 != 0.0f || f1 != 0.0f) {
return f0 < f1 ? r0 : r1;
}
// handle min of 0.0, -0.0 case.
return (jint_cast(f0) < jint_cast(f1)) ? r0 : r1;
}
//------------------------------add_ring---------------------------------------
const Type *MinDNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeD *r0 = t0->is_double_constant();
const TypeD *r1 = t1->is_double_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
double d0 = r0->getd();
double d1 = r1->getd();
if (d0 != 0.0 || d1 != 0.0) {
return d0 < d1 ? r0 : r1;
}
// handle min of 0.0, -0.0 case.
return (jlong_cast(d0) < jlong_cast(d1)) ? r0 : r1;
}
//------------------------------add_ring---------------------------------------
const Type *MaxFNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeF *r0 = t0->is_float_constant();
const TypeF *r1 = t1->is_float_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
float f0 = r0->getf();
float f1 = r1->getf();
if (f0 != 0.0f || f1 != 0.0f) {
return f0 > f1 ? r0 : r1;
}
// handle max of 0.0,-0.0 case.
return (jint_cast(f0) > jint_cast(f1)) ? r0 : r1;
}
//------------------------------add_ring---------------------------------------
const Type *MaxDNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeD *r0 = t0->is_double_constant();
const TypeD *r1 = t1->is_double_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
double d0 = r0->getd();
double d1 = r1->getd();
if (d0 != 0.0 || d1 != 0.0) {
return d0 > d1 ? r0 : r1;
}
// handle max of 0.0, -0.0 case.
return (jlong_cast(d0) > jlong_cast(d1)) ? r0 : r1;
}

@ -255,7 +255,7 @@ class MaxFNode : public MaxNode {
public:
MaxFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::FLOAT; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeF::NEG_INF; }
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
@ -267,7 +267,7 @@ class MinFNode : public MaxNode {
public:
MinFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::FLOAT; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeF::POS_INF; }
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
@ -279,7 +279,7 @@ class MaxDNode : public MaxNode {
public:
MaxDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::DOUBLE; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeD::NEG_INF; }
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
@ -291,7 +291,7 @@ class MinDNode : public MaxNode {
public:
MinDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::DOUBLE; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeD::POS_INF; }
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }

@ -6696,9 +6696,6 @@ bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) {
fatal_unexpected_iid(id);
break;
}
if (a->is_Con() || b->is_Con()) {
return false;
}
switch (id) {
case vmIntrinsics::_maxF: n = new MaxFNode(a, b); break;
case vmIntrinsics::_minF: n = new MinFNode(a, b); break;

@ -411,18 +411,8 @@ int Type::uhash( const Type *const t ) {
}
#define SMALLINT ((juint)3) // a value too insignificant to consider widening
static double pos_dinf() {
union { int64_t i; double d; } v;
v.i = CONST64(0x7ff0000000000000);
return v.d;
}
static float pos_finf() {
union { int32_t i; float f; } v;
v.i = 0x7f800000;
return v.f;
}
#define POSITIVE_INFINITE_F 0x7f800000 // hex representation for IEEE 754 single precision positive infinite
#define POSITIVE_INFINITE_D 0x7ff0000000000000 // hex representation for IEEE 754 double precision positive infinite
//--------------------------Initialize_shared----------------------------------
void Type::Initialize_shared(Compile* current) {
@ -453,13 +443,13 @@ void Type::Initialize_shared(Compile* current) {
TypeF::ZERO = TypeF::make(0.0); // Float 0 (positive zero)
TypeF::ONE = TypeF::make(1.0); // Float 1
TypeF::POS_INF = TypeF::make(pos_finf());
TypeF::NEG_INF = TypeF::make(-pos_finf());
TypeF::POS_INF = TypeF::make(jfloat_cast(POSITIVE_INFINITE_F));
TypeF::NEG_INF = TypeF::make(-jfloat_cast(POSITIVE_INFINITE_F));
TypeD::ZERO = TypeD::make(0.0); // Double 0 (positive zero)
TypeD::ONE = TypeD::make(1.0); // Double 1
TypeD::POS_INF = TypeD::make(pos_dinf());
TypeD::NEG_INF = TypeD::make(-pos_dinf());
TypeD::POS_INF = TypeD::make(jdouble_cast(POSITIVE_INFINITE_D));
TypeD::NEG_INF = TypeD::make(-jdouble_cast(POSITIVE_INFINITE_D));
TypeInt::MINUS_1 = TypeInt::make(-1); // -1
TypeInt::ZERO = TypeInt::make( 0); // 0

@ -264,7 +264,7 @@ public:
VM_ChangeSingleStep::VM_ChangeSingleStep(bool on)
: _on(on != 0)
: _on(on)
{
}
@ -331,18 +331,20 @@ void JvmtiEventControllerPrivate::set_should_post_single_step(bool on) {
}
// This change must always be occur when at a safepoint.
// Being at a safepoint causes the interpreter to use the
// safepoint dispatch table which we overload to find single
// step points. Just to be sure that it has been set, we
// call notice_safepoints when turning on single stepping.
// When we leave our current safepoint, should_post_single_step
// will be checked by the interpreter, and the table kept
// or changed accordingly.
// When _on == true, we use the safepoint interpreter dispatch table
// to allow us to find the single step points. Otherwise, we switch
// back to the regular interpreter dispatch table.
// Note: We call Interpreter::notice_safepoints() and ignore_safepoints()
// in a VM_Operation to safely make the dispatch table switch. We
// no longer rely on the safepoint mechanism to do any of this work
// for us.
void VM_ChangeSingleStep::doit() {
log_debug(interpreter, safepoint)("changing single step to '%s'", _on ? "on" : "off");
JvmtiEventControllerPrivate::set_should_post_single_step(_on);
if (_on) {
Interpreter::notice_safepoints();
} else {
Interpreter::ignore_safepoints();
}
}

@ -35,6 +35,7 @@
#include "code/scopeDesc.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/workgroup.hpp"
#include "interpreter/interpreter.hpp"
@ -643,6 +644,12 @@ public:
}
}
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP)) {
// Don't bother reporting event or time for this very short operation.
// To have any utility we'd also want to report whether needed.
OopStorage::trigger_cleanup_if_needed();
}
_subtasks.all_tasks_completed(_num_workers);
}
};

@ -77,6 +77,7 @@ class SafepointSynchronize : AllStatic {
SAFEPOINT_CLEANUP_STRING_TABLE_REHASH,
SAFEPOINT_CLEANUP_CLD_PURGE,
SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE,
SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP,
// Leave this one last.
SAFEPOINT_CLEANUP_NUM_TASKS
};

@ -83,27 +83,9 @@ void ServiceThread::initialize() {
}
}
static bool needs_oopstorage_cleanup(OopStorage* const* storages,
bool* needs_cleanup,
size_t size) {
bool any_needs_cleanup = false;
static void cleanup_oopstorages(OopStorage* const* storages, size_t size) {
for (size_t i = 0; i < size; ++i) {
assert(!needs_cleanup[i], "precondition");
if (storages[i]->needs_delete_empty_blocks()) {
needs_cleanup[i] = true;
any_needs_cleanup = true;
}
}
return any_needs_cleanup;
}
static void cleanup_oopstorages(OopStorage* const* storages,
const bool* needs_cleanup,
size_t size) {
for (size_t i = 0; i < size; ++i) {
if (needs_cleanup[i]) {
storages[i]->delete_empty_blocks();
}
storages[i]->delete_empty_blocks();
}
}
@ -126,7 +108,6 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
bool resolved_method_table_work = false;
bool protection_domain_table_work = false;
bool oopstorage_work = false;
bool oopstorages_cleanup[oopstorage_count] = {}; // Zero (false) initialize.
JvmtiDeferredEvent jvmti_event;
{
// Need state transition ThreadBlockInVM so that this thread
@ -152,10 +133,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
(symboltable_work = SymbolTable::has_work()) |
(resolved_method_table_work = ResolvedMethodTable::has_work()) |
(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
(oopstorage_work = needs_oopstorage_cleanup(oopstorages,
oopstorages_cleanup,
oopstorage_count)))
(oopstorage_work = OopStorage::has_cleanup_work_and_reset()))
== 0) {
// Wait until notified that there is some work to do.
ml.wait();
@ -199,7 +177,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
}
if (oopstorage_work) {
cleanup_oopstorages(oopstorages, oopstorages_cleanup, oopstorage_count);
cleanup_oopstorages(oopstorages, oopstorage_count);
}
}
}

@ -72,6 +72,7 @@
template(ZMarkStart) \
template(ZMarkEnd) \
template(ZRelocateStart) \
template(ZVerify) \
template(HandshakeOneThread) \
template(HandshakeAllThreads) \
template(HandshakeFallback) \
@ -128,6 +129,7 @@
template(ScavengeMonitors) \
template(PrintMetadata) \
template(GTestExecuteAtSafepoint) \
template(JFROldObject) \
class VM_Operation: public CHeapObj<mtInternal> {
public:

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -761,78 +761,87 @@ public abstract class CipherSpi {
+ " bytes of space in output buffer");
}
// detecting input and output buffer overlap may be tricky
// we can only write directly into output buffer when we
// are 100% sure it's safe to do so
boolean a1 = input.hasArray();
boolean a2 = output.hasArray();
int total = 0;
byte[] inArray, outArray;
if (a2) { // output has an accessible byte[]
outArray = output.array();
int outPos = output.position();
int outOfs = output.arrayOffset() + outPos;
if (a1) { // input also has an accessible byte[]
inArray = input.array();
int inOfs = input.arrayOffset() + inPos;
if (a1) { // input has an accessible byte[]
byte[] inArray = input.array();
int inOfs = input.arrayOffset() + inPos;
if (a2) { // output has an accessible byte[]
byte[] outArray = output.array();
int outPos = output.position();
int outOfs = output.arrayOffset() + outPos;
// check array address and offsets and use temp output buffer
// if output offset is larger than input offset and
// falls within the range of input data
boolean useTempOut = false;
if (inArray == outArray &&
((inOfs < outOfs) && (outOfs < inOfs + inLen))) {
useTempOut = true;
outArray = new byte[outLenNeeded];
outOfs = 0;
}
if (isUpdate) {
total = engineUpdate(inArray, inOfs, inLen, outArray, outOfs);
} else {
total = engineDoFinal(inArray, inOfs, inLen, outArray, outOfs);
}
if (useTempOut) {
output.put(outArray, outOfs, total);
} else {
// adjust output position manually
output.position(outPos + total);
}
// adjust input position manually
input.position(inLimit);
} else { // input does not have accessible byte[]
inArray = new byte[getTempArraySize(inLen)];
do {
int chunk = Math.min(inLen, inArray.length);
if (chunk > 0) {
input.get(inArray, 0, chunk);
}
int n;
if (isUpdate || (inLen > chunk)) {
n = engineUpdate(inArray, 0, chunk, outArray, outOfs);
} else {
n = engineDoFinal(inArray, 0, chunk, outArray, outOfs);
}
total += n;
outOfs += n;
inLen -= chunk;
} while (inLen > 0);
}
output.position(outPos + total);
} else { // output does not have an accessible byte[]
if (a1) { // but input has an accessible byte[]
inArray = input.array();
int inOfs = input.arrayOffset() + inPos;
} else { // output does not have an accessible byte[]
byte[] outArray = null;
if (isUpdate) {
outArray = engineUpdate(inArray, inOfs, inLen);
} else {
outArray = engineDoFinal(inArray, inOfs, inLen);
}
input.position(inLimit);
if (outArray != null && outArray.length != 0) {
output.put(outArray);
total = outArray.length;
}
} else { // input also does not have an accessible byte[]
inArray = new byte[getTempArraySize(inLen)];
do {
int chunk = Math.min(inLen, inArray.length);
if (chunk > 0) {
input.get(inArray, 0, chunk);
}
int n;
if (isUpdate || (inLen > chunk)) {
outArray = engineUpdate(inArray, 0, chunk);
} else {
outArray = engineDoFinal(inArray, 0, chunk);
}
if (outArray != null && outArray.length != 0) {
output.put(outArray);
total += outArray.length;
}
inLen -= chunk;
} while (inLen > 0);
// adjust input position manually
input.position(inLimit);
}
} else { // input does not have an accessible byte[]
// have to assume the worst, since we have no way of determine
// if input and output overlaps or not
byte[] tempOut = new byte[outLenNeeded];
int outOfs = 0;
byte[] tempIn = new byte[getTempArraySize(inLen)];
do {
int chunk = Math.min(inLen, tempIn.length);
if (chunk > 0) {
input.get(tempIn, 0, chunk);
}
int n;
if (isUpdate || (inLen > chunk)) {
n = engineUpdate(tempIn, 0, chunk, tempOut, outOfs);
} else {
n = engineDoFinal(tempIn, 0, chunk, tempOut, outOfs);
}
outOfs += n;
total += n;
inLen -= chunk;
} while (inLen > 0);
if (total > 0) {
output.put(tempOut, 0, total);
}
}
return total;
}

@ -75,7 +75,7 @@ java.launcher.opt.footer = \
\ -D<name>=<value>\n\
\ set a system property\n\
\ -verbose:[class|module|gc|jni]\n\
\ enable verbose output\n\
\ enable verbose output for the given subsystem\n\
\ -version print product version to the error stream and exit\n\
\ --version print product version to the output stream and exit\n\
\ -showversion print product version to the error stream and continue\n\
@ -129,9 +129,9 @@ java.launcher.X.usage=\n\
\ append to end of bootstrap class path\n\
\ -Xcheck:jni perform additional checks for JNI functions\n\
\ -Xcomp forces compilation of methods on first invocation\n\
\ -Xdebug provided for backward compatibility\n\
\ -Xdebug does nothing. Provided for backward compatibility.\n\
\ -Xdiag show additional diagnostic messages\n\
\ -Xfuture enable strictest checks, anticipating future default\n\
\ -Xfuture enable strictest checks, anticipating future default.\n\
\ This option is deprecated and may be removed in a\n\
\ future release.\n\
\ -Xint interpreted mode execution only\n\
@ -141,7 +141,9 @@ java.launcher.X.usage=\n\
\ -Xlog:<opts> Configure or enable logging with the Java Virtual\n\
\ Machine (JVM) unified logging framework. Use -Xlog:help\n\
\ for details.\n\
\ -Xloggc:<file> log GC status to a file with time stamps\n\
\ -Xloggc:<file> log GC status to a file with time stamps.\n\
\ This option is deprecated and may be removed in a\n\
\ future release. It is replaced by -Xlog:gc:<file>.\n\
\ -Xmixed mixed mode execution (default)\n\
\ -Xmn<size> sets the initial and maximum size (in bytes) of the heap\n\
\ for the young generation (nursery)\n\
@ -152,6 +154,8 @@ java.launcher.X.usage=\n\
\ -Xshare:auto use shared class data if possible (default)\n\
\ -Xshare:off do not attempt to use shared class data\n\
\ -Xshare:on require using shared class data, otherwise fail.\n\
\ This is a testing option and may lead to intermittent\n\
\ failures. It should not be used in production environments.\n\
\ -XshowSettings show all settings and continue\n\
\ -XshowSettings:all\n\
\ show all settings and continue\n\
@ -191,7 +195,6 @@ java.launcher.X.usage=\n\
\ --patch-module <module>=<file>({0}<file>)*\n\
\ override or augment a module with classes and resources\n\
\ in JAR files or directories.\n\
\ --disable-@files disable further argument file expansion\n\
\ --source <version>\n\
\ set the version of the source in source-file mode.\n\n\
These extra options are subject to change without notice.\n
@ -199,7 +202,7 @@ These extra options are subject to change without notice.\n
# Translators please note do not translate the options themselves
java.launcher.X.macosx.usage=\
\n\
The following options are Mac OS X specific:\n\
The following options are macOS specific:\n\
\ -XstartOnFirstThread\n\
\ run the main() method on the first (AppKit) thread\n\
\ -Xdock:name=<application name>\n\

@ -1,5 +1,5 @@
#
# Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -35,8 +35,9 @@ java.launcher.opt.footer = \ -cp <\u30C7\u30A3\u30EC\u30AF\u30C8\u30EA\u304A\
\u6307\u5B9A\u3055\u308C\u305F\u6700\u3082\u9069\u5207\u306A\u30B9\u30B1\u30FC\u30EA\u30F3\u30B0\u6E08\u30A4\u30E1\u30FC\u30B8\u304C\u9078\u629E\u3055\u308C\u307E\u3059\n (\u81EA\u52D5\u7684)\u3002\n \u8A73\u7D30\u306F\u3001SplashScreen API\u306E\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u3092\u53C2\u7167\u3057\u3066\u304F\u3060\u3055\u3044\n @argument\u30D5\u30A1\u30A4\u30EB\n \u30AA\u30D7\u30B7\u30E7\u30F3\u3092\u542B\u30801\u3064\u4EE5\u4E0A\u306E\u5F15\u6570\u30D5\u30A1\u30A4\u30EB\n -disable-@files\n \u3055\u3089\u306A\u308B\u5F15\u6570\u30D5\u30A1\u30A4\u30EB\u62E1\u5F35\u3092\u7121\u52B9\u306B\u3057\u307E\u3059\n --enable-preview\n \u30AF\u30E9\u30B9\u3092\u3053\u306E\u30EA\u30EA\u30FC\u30B9\u306E\u30D7\u30EC\u30D3\u30E5\u30FC\u6A5F\u80FD\u306B\u4F9D\u5B58\u3055\u305B\u308B\u3053\u3068\u304C\u3067\u304D\u307E\u3059\n\u9577\u3044\u30AA\u30D7\u30B7\u30E7\u30F3\u306E\u5F15\u6570\u3092\u6307\u5B9A\u3059\u308B\u5834\u5408\u3001--<name>=<value>\u307E\u305F\u306F\n--<name> <value>\u3092\u4F7F\u7528\u3067\u304D\u307E\u3059\u3002\n
# Translators please note do not translate the options themselves
java.launcher.X.usage=\n -Xbatch \u30D0\u30C3\u30AF\u30B0\u30E9\u30A6\u30F3\u30C9\u306E\u30B3\u30F3\u30D1\u30A4\u30EB\u3092\u7121\u52B9\u306B\u3059\u308B\n -Xbootclasspath/a:<{0}\u3067\u533A\u5207\u3089\u308C\u305F\u30C7\u30A3\u30EC\u30AF\u30C8\u30EA\u304A\u3088\u3073zip/jar\u30D5\u30A1\u30A4\u30EB>\n \u30D6\u30FC\u30C8\u30B9\u30C8\u30E9\u30C3\u30D7\u30FB\u30AF\u30E9\u30B9\u30FB\u30D1\u30B9\u306E\u6700\u5F8C\u306B\u8FFD\u52A0\u3059\u308B\n -Xcheck:jni JNI\u95A2\u6570\u306B\u5BFE\u3059\u308B\u8FFD\u52A0\u306E\u30C1\u30A7\u30C3\u30AF\u3092\u5B9F\u884C\u3059\u308B\n -Xcomp \u521D\u56DE\u547C\u51FA\u3057\u6642\u306B\u30E1\u30BD\u30C3\u30C9\u306E\u30B3\u30F3\u30D1\u30A4\u30EB\u3092\u5F37\u5236\u3059\u308B\n -Xdebug \u4E0B\u4F4D\u4E92\u63DB\u6027\u306E\u305F\u3081\u306B\u63D0\u4F9B\n -Xdiag \u8FFD\u52A0\u306E\u8A3A\u65AD\u30E1\u30C3\u30BB\u30FC\u30B8\u3092\u8868\u793A\u3059\u308B\n -Xfuture \u5C06\u6765\u306E\u30C7\u30D5\u30A9\u30EB\u30C8\u3092\u898B\u8D8A\u3057\u3066\u3001\u6700\u3082\u53B3\u5BC6\u306A\u30C1\u30A7\u30C3\u30AF\u3092\u6709\u52B9\u306B\u3059\u308B\n -Xint \u30A4\u30F3\u30BF\u30D7\u30EA\u30BF\u30FB\u30E2\u30FC\u30C9\u306E\u5B9F\u884C\u306E\u307F\n -Xinternalversion\n -version\u30AA\u30D7\u30B7\u30E7\u30F3\u3088\u308A\u8A73\u7D30\u306AJVM\u30D0\u30FC\u30B8\u30E7\u30F3\u60C5\u5831\u3092\n \u8868\u793A\u3059\u308B\n -Xloggc:<file> \u30BF\u30A4\u30E0\u30B9\u30BF\u30F3\u30D7\u304C\u4ED8\u3044\u305F\u30D5\u30A1\u30A4\u30EB\u306BGC\u30B9\u30C6\u30FC\u30BF\u30B9\u306E\u30ED\u30B0\u3092\u8A18\u9332\u3059\u308B\n -Xmixed \u6DF7\u5408\u30E2\u30FC\u30C9\u306E\u5B9F\u884C(\u30C7\u30D5\u30A9\u30EB\u30C8)\n -Xmn<size> \u82E5\u3044\u4E16\u4EE3(\u30CA\u30FC\u30B5\u30EA)\u306E\u30D2\u30FC\u30D7\u306E\u521D\u671F\u304A\u3088\u3073\u6700\u5927\u30B5\u30A4\u30BA(\u30D0\u30A4\u30C8\u5358\u4F4D)\n \u3092\u8A2D\u5B9A\u3059\u308B\n -Xms<size> Java\u306E\u521D\u671F\u30D2\u30FC\u30D7\u30FB\u30B5\u30A4\u30BA\u3092\u8A2D\u5B9A\u3059\u308B\n -Xmx<size> Java\u306E\u6700\u5927\u30D2\u30FC\u30D7\u30FB\u30B5\u30A4\u30BA\u3092\u8A2D\u5B9A\u3059\u308B\n -Xnoclassgc \u30AF\u30E9\u30B9\u306E\u30AC\u30D9\u30FC\u30B8\u30FB\u30B3\u30EC\u30AF\u30B7\u30E7\u30F3\u3092\u7121\u52B9\u306B\u3059\u308B\n -Xrs Java/VM\u306B\u3088\u308BOS\u30B7\u30B0\u30CA\u30EB\u306E\u4F7F\u7528\u3092\u524A\u6E1B\u3059\u308B(\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u3092\u53C2\u7167)\n -Xshare:auto \u53EF\u80FD\u3067\u3042\u308C\u3070\u5171\u6709\u30AF\u30E9\u30B9\u306E\u30C7\u30FC\u30BF\u3092\u4F7F\u7528\u3059\u308B(\u30C7\u30D5\u30A9\u30EB\u30C8)\n -Xshare:off \u5171\u6709\u30AF\u30E9\u30B9\u306E\u30C7\u30FC\u30BF\u3092\u4F7F\u7528\u3057\u3088\u3046\u3068\u3057\u306A\u3044\n -Xshare:on \u5171\u6709\u30AF\u30E9\u30B9\u30FB\u30C7\u30FC\u30BF\u306E\u4F7F\u7528\u3092\u5FC5\u9808\u306B\u3057\u3001\u3067\u304D\u306A\u3051\u308C\u3070\u5931\u6557\u3059\u308B\u3002\n -XshowSettings \u3059\u3079\u3066\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3059\u308B\n -XshowSettings:all\n \u3059\u3079\u3066\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3059\u308B\n -XshowSettings:locale\n \u3059\u3079\u3066\u306E\u30ED\u30B1\u30FC\u30EB\u95A2\u9023\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3059\u308B\n -XshowSettings:properties\n \u3059\u3079\u3066\u306E\u30D7\u30ED\u30D1\u30C6\u30A3\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3059\u308B\n -XshowSettings:vm\n \u3059\u3079\u3066\u306EVM\u95A2\u9023\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3059\u308B\n -XshowSettings:system\n (Linux\u306E\u307F) \
\u30DB\u30B9\u30C8\u30FB\u30B7\u30B9\u30C6\u30E0\u307E\u305F\u306F\u30B3\u30F3\u30C6\u30CA\u306E\n \u69CB\u6210\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3059\u308B\n -Xss<size> Java\u306E\u30B9\u30EC\u30C3\u30C9\u30FB\u30B9\u30BF\u30C3\u30AF\u30FB\u30B5\u30A4\u30BA\u3092\u8A2D\u5B9A\u3059\u308B\n -Xverify \u30D0\u30A4\u30C8\u30B3\u30FC\u30C9\u691C\u8A3C\u6A5F\u80FD\u306E\u30E2\u30FC\u30C9\u3092\u8A2D\u5B9A\u3059\u308B\n --add-reads <module>=<target-module>(,<target-module>)*\n \u30E2\u30B8\u30E5\u30FC\u30EB\u5BA3\u8A00\u306B\u95A2\u4FC2\u306A\u304F\u3001<module>\u3092\u66F4\u65B0\u3057\u3066<target-module>\n \u3092\u8AAD\u307F\u53D6\u308B\u3002 \n <target-module>\u3092ALL-UNNAMED\u306B\u8A2D\u5B9A\u3059\u308B\u3068\u3001\u3059\u3079\u3066\u306E\u540D\u524D\u306E\u306A\u3044\u30E2\u30B8\u30E5\u30FC\u30EB\u3092\n \u8AAD\u307F\u53D6\u308C\u307E\u3059\u3002\n --add-exports <module>/<package>=<target-module>(,<target-module>)*\n \u30E2\u30B8\u30E5\u30FC\u30EB\u5BA3\u8A00\u306B\u95A2\u4FC2\u306A\u304F\u3001<module>\u3092\u66F4\u65B0\u3057\u3066<package>\u3092<target-module>\u306B\n \u30A8\u30AF\u30B9\u30DD\u30FC\u30C8\u3059\u308B\u3002\n <target-module>\u3092ALL-UNNAMED\u306B\u8A2D\u5B9A\u3059\u308B\u3068\u3001\u3059\u3079\u3066\u306E\u540D\u524D\u306E\u306A\u3044\u30E2\u30B8\u30E5\u30FC\u30EB\u306B\n \u30A8\u30AF\u30B9\u30DD\u30FC\u30C8\u3067\u304D\u307E\u3059\u3002\n --add-opens <module>/<package>=<target-module>(,<target-module>)*\n \u30E2\u30B8\u30E5\u30FC\u30EB\u5BA3\u8A00\u306B\u95A2\u4FC2\u306A\u304F\u3001<module>\u3092\u66F4\u65B0\u3057\u3066\n <package>\u3092<target-module>\u306B\u958B\u304F\u3002\n --illegal-access=<value>\n \u540D\u524D\u306E\u306A\u3044\u30E2\u30B8\u30E5\u30FC\u30EB\u5185\u306E\u30B3\u30FC\u30C9\u306B\u3088\u308B\u3001\u540D\u524D\u306E\u3042\u308B\u30E2\u30B8\u30E5\u30FC\u30EB\u5185\u306E\n \u30BF\u30A4\u30D7\u306E\u30E1\u30F3\u30D0\u30FC\u3078\u306E\u30A2\u30AF\u30BB\u30B9\u3092\u8A31\u53EF\u307E\u305F\u306F\u62D2\u5426\u3059\u308B\u3002\n <value>\u306F"deny"\u3001"permit"\u3001"warn"\u3001"debug"\u306E\u3044\u305A\u308C\u304B\u3067\u3059\n \u3053\u306E\u30AA\u30D7\u30B7\u30E7\u30F3\u306F\u5C06\u6765\u306E\u30EA\u30EA\u30FC\u30B9\u3067\u524A\u9664\u3055\u308C\u307E\u3059\u3002\n --limit-modules <module name>[,<module name>...]\n \u53C2\u7167\u53EF\u80FD\u306A\u30E2\u30B8\u30E5\u30FC\u30EB\u306E\u9818\u57DF\u3092\u5236\u9650\u3059\u308B\n --patch-module <module>=<file>({0}<file>)*\n JAR\u30D5\u30A1\u30A4\u30EB\u307E\u305F\u306F\u30C7\u30A3\u30EC\u30AF\u30C8\u30EA\u306E\u30AF\u30E9\u30B9\u304A\u3088\u3073\u30EA\u30BD\u30FC\u30B9\u3067\n \u30E2\u30B8\u30E5\u30FC\u30EB\u3092\u30AA\u30FC\u30D0\u30FC\u30E9\u30A4\u30C9\u307E\u305F\u306F\u62E1\u5F35\u3059\u308B\u3002\n --disable-@files \u3055\u3089\u306A\u308B\u30D5\u30A1\u30A4\u30EB\u62E1\u5F35\u3092\u7121\u52B9\u306B\u3059\u308B\n --source <version>\n \u30BD\u30FC\u30B9\u30D5\u30A1\u30A4\u30EB\u30FB\u30E2\u30FC\u30C9\u3067\u30BD\u30FC\u30B9\u306E\u30D0\u30FC\u30B8\u30E7\u30F3\u3092\u8A2D\u5B9A\u3059\u308B\u3002\n\n\u3053\u308C\u3089\u306F\u8FFD\u52A0\u30AA\u30D7\u30B7\u30E7\u30F3\u3067\u3042\u308A\u4E88\u544A\u306A\u3057\u306B\u5909\u66F4\u3055\u308C\u308B\u3053\u3068\u304C\u3042\u308A\u307E\u3059\u3002\n
java.launcher.X.usage=\n -Xbatch \u30D0\u30C3\u30AF\u30B0\u30E9\u30A6\u30F3\u30C9\u30FB\u30B3\u30F3\u30D1\u30A4\u30EB\u3092\u7121\u52B9\u306B\u3057\u307E\u3059\n -Xbootclasspath/a:<directories and zip/jar files separated by {0}>\n \u30D6\u30FC\u30C8\u30B9\u30C8\u30E9\u30C3\u30D7\u30FB\u30AF\u30E9\u30B9\u30FB\u30D1\u30B9\u306E\u6700\u5F8C\u306B\u8FFD\u52A0\u3057\u307E\u3059\n -Xcheck:jni JNI\u95A2\u6570\u306B\u5BFE\u3059\u308B\u8FFD\u52A0\u306E\u30C1\u30A7\u30C3\u30AF\u3092\u5B9F\u884C\u3057\u307E\u3059\n -Xcomp \u521D\u56DE\u547C\u51FA\u3057\u6642\u306B\u30E1\u30BD\u30C3\u30C9\u306E\u30B3\u30F3\u30D1\u30A4\u30EB\u3092\u5F37\u5236\u3057\u307E\u3059\n -Xdebug \u4E0B\u4F4D\u4E92\u63DB\u6027\u306E\u305F\u3081\u306B\u7528\u610F\u3055\u308C\u3066\u3044\u307E\u3059\n -Xdiag \u8FFD\u52A0\u306E\u8A3A\u65AD\u30E1\u30C3\u30BB\u30FC\u30B8\u3092\u8868\u793A\u3057\u307E\u3059\n -Xfuture \u5C06\u6765\u306E\u30C7\u30D5\u30A9\u30EB\u30C8\u3092\u898B\u8D8A\u3057\u3066\u3001\u6700\u3082\u53B3\u5BC6\u306A\u30C1\u30A7\u30C3\u30AF\u3092\u6709\u52B9\u306B\u3057\u307E\u3059\n \u3053\u306E\u30AA\u30D7\u30B7\u30E7\u30F3\u306F\u975E\u63A8\u5968\u3067\u3042\u308A\u3001\u5C06\u6765\u306E\u30EA\u30EA\u30FC\u30B9\u3067\u524A\u9664\u3055\u308C\u308B\n \u53EF\u80FD\u6027\u304C\u3042\u308A\u307E\u3059\u3002\n -Xint \u30A4\u30F3\u30BF\u30D7\u30EA\u30BF\u30FB\u30E2\u30FC\u30C9\u306E\u5B9F\u884C\u306E\u307F\n -Xinternalversion\n -version\u30AA\u30D7\u30B7\u30E7\u30F3\u3088\u308A\u8A73\u7D30\u306AJVM\u30D0\u30FC\u30B8\u30E7\u30F3\u60C5\u5831\u3092\n \u8868\u793A\u3057\u307E\u3059\n -Xlog:<opts> Java Virtual Machine (JVM)\u7D71\u5408\u30ED\u30AE\u30F3\u30B0\u30FB\u30D5\u30EC\u30FC\u30E0\u30EF\u30FC\u30AF\u3067\u306E\n \u30ED\u30AE\u30F3\u30B0\u3092\u69CB\u6210\u307E\u305F\u306F\u6709\u52B9\u5316\u3057\u307E\u3059\u3002\u8A73\u7D30\u306F\u3001-Xlog:help\u3092\n \u4F7F\u7528\u3057\u3066\u304F\u3060\u3055\u3044\u3002\n -Xloggc:<file> \u30BF\u30A4\u30E0\u30B9\u30BF\u30F3\u30D7\u304C\u4ED8\u3044\u305F\u30D5\u30A1\u30A4\u30EB\u306BGC\u30B9\u30C6\u30FC\u30BF\u30B9\u306E\u30ED\u30B0\u3092\u8A18\u9332\u3057\u307E\u3059\n -Xmixed \u6DF7\u5408\u30E2\u30FC\u30C9\u306E\u5B9F\u884C(\u30C7\u30D5\u30A9\u30EB\u30C8)\n -Xmn<size> \u82E5\u3044\u4E16\u4EE3(\u30CA\u30FC\u30B5\u30EA)\u306E\u30D2\u30FC\u30D7\u306E\u521D\u671F\u30B5\u30A4\u30BA\u304A\u3088\u3073\u6700\u5927\u30B5\u30A4\u30BA\n (\u30D0\u30A4\u30C8\u5358\u4F4D)\u3092\u8A2D\u5B9A\u3057\u307E\u3059\n -Xms<size> Java\u306E\u521D\u671F\u30D2\u30FC\u30D7\u30FB\u30B5\u30A4\u30BA\u3092\u8A2D\u5B9A\u3057\u307E\u3059\n -Xmx<size> Java\u306E\u6700\u5927\u30D2\u30FC\u30D7\u30FB\u30B5\u30A4\u30BA\u3092\u8A2D\u5B9A\u3057\u307E\u3059\n -Xnoclassgc \u30AF\u30E9\u30B9\u306E\u30AC\u30D9\u30FC\u30B8\u30FB\u30B3\u30EC\u30AF\u30B7\u30E7\u30F3\u3092\u7121\u52B9\u306B\u3057\u307E\u3059\n -Xrs Java/VM\u306B\u3088\u308BOS\u30B7\u30B0\u30CA\u30EB\u306E\u4F7F\u7528\u3092\u524A\u6E1B\u3057\u307E\u3059(\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u3092\u53C2\u7167)\n -Xshare:auto \u53EF\u80FD\u3067\u3042\u308C\u3070\u5171\u6709\u30AF\u30E9\u30B9\u30FB\u30C7\u30FC\u30BF\u3092\u4F7F\u7528\u3057\u307E\u3059(\u30C7\u30D5\u30A9\u30EB\u30C8)\n -Xshare:off \u5171\u6709\u30AF\u30E9\u30B9\u30FB\u30C7\u30FC\u30BF\u306E\u4F7F\u7528\u3092\u8A66\u307F\u307E\u305B\u3093\n -Xshare:on \u5171\u6709\u30AF\u30E9\u30B9\u30FB\u30C7\u30FC\u30BF\u306E\u4F7F\u7528\u3092\u5FC5\u9808\u306B\u3057\u3001\u3067\u304D\u306A\u3051\u308C\u3070\u5931\u6557\u3057\u307E\u3059\u3002\n -XshowSettings \u3059\u3079\u3066\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3057\u307E\u3059\n -XshowSettings:all\n \
\u3059\u3079\u3066\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3057\u307E\u3059\n -XshowSettings:locale\n \u3059\u3079\u3066\u306E\u30ED\u30B1\u30FC\u30EB\u95A2\u9023\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3057\u307E\u3059\n -XshowSettings:properties\n \u3059\u3079\u3066\u306E\u30D7\u30ED\u30D1\u30C6\u30A3\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3057\u307E\u3059\n -XshowSettings:vm\n \u3059\u3079\u3066\u306EVM\u95A2\u9023\u306E\u8A2D\u5B9A\u3092\u8868\u793A\u3057\u3066\u7D9A\u884C\u3057\u307E\u3059\n -XshowSettings:system\n (Linux\u306E\u307F)\u30DB\u30B9\u30C8\u30FB\u30B7\u30B9\u30C6\u30E0\u307E\u305F\u306F\u30B3\u30F3\u30C6\u30CA\u3092\u8868\u793A\u3057\u307E\u3059\n \u69CB\u6210\u3057\u3066\u7D9A\u884C\u3057\u307E\u3059\n -Xss<size> java\u30B9\u30EC\u30C3\u30C9\u306E\u30B9\u30BF\u30C3\u30AF\u30FB\u30B5\u30A4\u30BA\u3092\u8A2D\u5B9A\u3057\u307E\u3059\n -Xverify \u30D0\u30A4\u30C8\u30B3\u30FC\u30C9\u30FB\u30D9\u30EA\u30D5\u30A1\u30A4\u30A2\u306E\u30E2\u30FC\u30C9\u3092\u8A2D\u5B9A\u3057\u307E\u3059\n \u30AA\u30D7\u30B7\u30E7\u30F3-Xverify:none\u306F\u975E\u63A8\u5968\u306B\u306A\u308A\u3001\n \u5C06\u6765\u306E\u30EA\u30EA\u30FC\u30B9\u3067\u524A\u9664\u3055\u308C\u308B\u53EF\u80FD\u6027\u304C\u3042\u308A\u307E\u3059\u3002\n --add-reads <module>=<target-module>(,<target-module>)*\n \u30E2\u30B8\u30E5\u30FC\u30EB\u5BA3\u8A00\u306B\u95A2\u4FC2\u306A\u304F\u3001<module>\u3092\u66F4\u65B0\u3057\u3066<target-module>\u3092\n \u8AAD\u307F\u53D6\u308A\u307E\u3059\u3002 \n <target-module>\u3092ALL-UNNAMED\u306B\u8A2D\u5B9A\u3059\u308B\u3068\u3001\u3059\u3079\u3066\u306E\u540D\u524D\u306E\u306A\u3044\u30E2\u30B8\u30E5\u30FC\u30EB\u3092\n \u8AAD\u307F\u53D6\u308B\u3053\u3068\u304C\u3067\u304D\u307E\u3059\u3002\n --add-exports <module>/<package>=<target-module>(,<target-module>)*\n \u30E2\u30B8\u30E5\u30FC\u30EB\u5BA3\u8A00\u306B\u95A2\u4FC2\u306A\u304F\u3001<module>\u3092\u66F4\u65B0\u3057\u3066<package>\u3092<target-module>\u306B\n \u30A8\u30AF\u30B9\u30DD\u30FC\u30C8\u3057\u307E\u3059\u3002\n <target-module>\u3092ALL-UNNAMED\u306B\u8A2D\u5B9A\u3059\u308B\u3068\u3001\u3059\u3079\u3066\u306E\u540D\u524D\u306E\u306A\u3044\u30E2\u30B8\u30E5\u30FC\u30EB\u306B\n \u30A8\u30AF\u30B9\u30DD\u30FC\u30C8\u3067\u304D\u307E\u3059\u3002\n --add-opens <module>/<package>=<target-module>(,<target-module>)*\n \u30E2\u30B8\u30E5\u30FC\u30EB\u5BA3\u8A00\u306B\u95A2\u4FC2\u306A\u304F\u3001<module>\u3092\u66F4\u65B0\u3057\u3066<package>\u3092\n <target-module>\u306B\u958B\u304D\u307E\u3059\u3002\n --illegal-access=<value>\n \u540D\u524D\u306E\u306A\u3044\u30E2\u30B8\u30E5\u30FC\u30EB\u5185\u306E\u30B3\u30FC\u30C9\u306B\u3088\u308B\u3001\u540D\u524D\u306E\u3042\u308B\u30E2\u30B8\u30E5\u30FC\u30EB\u5185\u306E\u30BF\u30A4\u30D7\u306E\u30E1\u30F3\u30D0\u30FC\u3078\u306E\u4E0D\u6B63\u30A2\u30AF\u30BB\u30B9\u3092\n \u8A31\u53EF\u307E\u305F\u306F\u62D2\u5426\u3057\u307E\u3059\u3002\n <value>\u306F"deny"\u3001"permit"\u3001"warn"\u307E\u305F\u306F"debug"\u306E\u3044\u305A\u308C\u304B\u3067\u3059\n \u3053\u306E\u30AA\u30D7\u30B7\u30E7\u30F3\u306F\u3001\u5C06\u6765\u306E\u30EA\u30EA\u30FC\u30B9\u3067\u524A\u9664\u3055\u308C\u308B\u4E88\u5B9A\u3067\u3059\u3002\n --limit-modules <module name>[,<module name>...]\n \u53C2\u7167\u53EF\u80FD\u306A\u30E2\u30B8\u30E5\u30FC\u30EB\u306E\u9818\u57DF\u3092\u5236\u9650\u3057\u307E\u3059\n --patch-module <module>=<file>({0}<file>)*\n \
JAR\u30D5\u30A1\u30A4\u30EB\u307E\u305F\u306F\u30C7\u30A3\u30EC\u30AF\u30C8\u30EA\u306E\u30AF\u30E9\u30B9\u304A\u3088\u3073\u30EA\u30BD\u30FC\u30B9\u3067\n \u30E2\u30B8\u30E5\u30FC\u30EB\u3092\u30AA\u30FC\u30D0\u30FC\u30E9\u30A4\u30C9\u307E\u305F\u306F\u62E1\u5F35\u3057\u307E\u3059\u3002\n --disable-@files\u306F\u3001\u3055\u3089\u306A\u308B\u5F15\u6570\u30D5\u30A1\u30A4\u30EB\u62E1\u5F35\u3092\u7121\u52B9\u306B\u3057\u307E\u3059\n --source <version>\n \u30BD\u30FC\u30B9\u30D5\u30A1\u30A4\u30EB\u30FB\u30E2\u30FC\u30C9\u3067\u30BD\u30FC\u30B9\u306E\u30D0\u30FC\u30B8\u30E7\u30F3\u3092\u8A2D\u5B9A\u3057\u307E\u3059\u3002\n\n\u3053\u306E\u8FFD\u52A0\u30AA\u30D7\u30B7\u30E7\u30F3\u306F\u4E88\u544A\u306A\u3057\u306B\u5909\u66F4\u3055\u308C\u308B\u3053\u3068\u304C\u3042\u308A\u307E\u3059\u3002\n
# Translators please note do not translate the options themselves
java.launcher.X.macosx.usage=\n\u6B21\u306E\u30AA\u30D7\u30B7\u30E7\u30F3\u306FMac OS X\u56FA\u6709\u3067\u3059:\n -XstartOnFirstThread\n main()\u30E1\u30BD\u30C3\u30C9\u3092\u6700\u521D(AppKit)\u306E\u30B9\u30EC\u30C3\u30C9\u3067\u5B9F\u884C\u3059\u308B\n -Xdock:name=<application name>\n Dock\u306B\u8868\u793A\u3055\u308C\u308B\u30C7\u30D5\u30A9\u30EB\u30C8\u30FB\u30A2\u30D7\u30EA\u30B1\u30FC\u30B7\u30E7\u30F3\u540D\u3092\u30AA\u30FC\u30D0\u30FC\u30E9\u30A4\u30C9\u3059\u308B\n -Xdock:icon=<path to icon file>\n Dock\u306B\u8868\u793A\u3055\u308C\u308B\u30C7\u30D5\u30A9\u30EB\u30C8\u30FB\u30A2\u30A4\u30B3\u30F3\u3092\u30AA\u30FC\u30D0\u30FC\u30E9\u30A4\u30C9\u3059\u308B\n\n
@ -58,4 +59,4 @@ java.launcher.module.error1=\u30E2\u30B8\u30E5\u30FC\u30EB{0}\u306BModuleMainCla
java.launcher.module.error2=\u30A8\u30E9\u30FC: \u30E2\u30B8\u30E5\u30FC\u30EB{1}\u306B\u30E1\u30A4\u30F3\u30FB\u30AF\u30E9\u30B9{0}\u304C\u898B\u3064\u304B\u3089\u306A\u304B\u3063\u305F\u304B\u30ED\u30FC\u30C9\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F
java.launcher.module.error3=\u30A8\u30E9\u30FC: \u30E2\u30B8\u30E5\u30FC\u30EB{1}\u306E\u30E1\u30A4\u30F3\u30FB\u30AF\u30E9\u30B9{0}\u3092\u30ED\u30FC\u30C9\u3067\u304D\u307E\u305B\u3093\n\t{2}
java.launcher.module.error4={0}\u304C\u898B\u3064\u304B\u308A\u307E\u305B\u3093
java.launcher.module.error5=\u30A8\u30E9\u30FC: \u30E2\u30B8\u30E5\u30FC\u30EB{1}\u306E\u30E1\u30A4\u30F3\u30FB\u30AF\u30E9\u30B9{0}\u3092\u521D\u671F\u5316\u3067\u304D\u307E\u305B\u3093\n\u539F\u56E0: {1}: {2}
java.launcher.module.error5=\u30A8\u30E9\u30FC: \u30E2\u30B8\u30E5\u30FC\u30EB{1}\u306E\u30E1\u30A4\u30F3\u30FB\u30AF\u30E9\u30B9{0}\u3092\u521D\u671F\u5316\u3067\u304D\u307E\u305B\u3093\n\u539F\u56E0: {2}: {3}

@ -1,5 +1,5 @@
#
# Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -34,8 +34,8 @@ java.launcher.opt.footer = \ -cp <\u76EE\u5F55\u548C zip/jar \u6587\u4EF6\u76
| -enablesystemassertions\n \u542F\u7528\u7CFB\u7EDF\u65AD\u8A00\n -dsa | -disablesystemassertions\n \u7981\u7528\u7CFB\u7EDF\u65AD\u8A00\n -agentlib:<\u5E93\u540D>[=<\u9009\u9879>]\n \u52A0\u8F7D\u672C\u673A\u4EE3\u7406\u5E93 <\u5E93\u540D>, \u4F8B\u5982 -agentlib:jdwp\n \u53E6\u8BF7\u53C2\u9605 -agentlib:jdwp=help\n -agentpath:<\u8DEF\u5F84\u540D>[=<\u9009\u9879>]\n \u6309\u5B8C\u6574\u8DEF\u5F84\u540D\u52A0\u8F7D\u672C\u673A\u4EE3\u7406\u5E93\n -javaagent:<jar \u8DEF\u5F84>[=<\u9009\u9879>]\n \u52A0\u8F7D Java \u7F16\u7A0B\u8BED\u8A00\u4EE3\u7406, \u8BF7\u53C2\u9605 java.lang.instrument\n -splash:<\u56FE\u50CF\u8DEF\u5F84>\n \u4F7F\u7528\u6307\u5B9A\u7684\u56FE\u50CF\u663E\u793A\u542F\u52A8\u5C4F\u5E55\n \u81EA\u52A8\u652F\u6301\u548C\u4F7F\u7528 HiDPI \u7F29\u653E\u56FE\u50CF\n (\u5982\u679C\u53EF\u7528)\u3002\u5E94\u59CB\u7EC8\u5C06\u672A\u7F29\u653E\u7684\u56FE\u50CF\u6587\u4EF6\u540D (\u4F8B\u5982, image.ext)\n \u4F5C\u4E3A\u53C2\u6570\u4F20\u9012\u7ED9 -splash \u9009\u9879\u3002\n \u5C06\u81EA\u52A8\u9009\u53D6\u63D0\u4F9B\u7684\u6700\u5408\u9002\u7684\u7F29\u653E\n \u56FE\u50CF\u3002\n \u6709\u5173\u8BE6\u7EC6\u4FE1\u606F, \u8BF7\u53C2\u9605 SplashScreen API \u6587\u6863\n @argument \u6587\u4EF6\n \u4E00\u4E2A\u6216\u591A\u4E2A\u5305\u542B\u9009\u9879\u7684\u53C2\u6570\u6587\u4EF6\n -disable-@files\n \u963B\u6B62\u8FDB\u4E00\u6B65\u6269\u5C55\u53C2\u6570\u6587\u4EF6\n --enable-preview\n \u5141\u8BB8\u7C7B\u4F9D\u8D56\u4E8E\u6B64\u53D1\u884C\u7248\u7684\u9884\u89C8\u529F\u80FD\n\u8981\u4E3A\u957F\u9009\u9879\u6307\u5B9A\u53C2\u6570, \u53EF\u4EE5\u4F7F\u7528 --<\u540D\u79F0>=<\u503C> \u6216\n--<\u540D\u79F0> <\u503C>\u3002\n
# Translators please note do not translate the options themselves
java.launcher.X.usage=\n -Xbatch \u7981\u7528\u540E\u53F0\u7F16\u8BD1\n -Xbootclasspath/a:<\u4EE5 {0} \u5206\u9694\u7684\u76EE\u5F55\u548C zip/jar \u6587\u4EF6>\n \u9644\u52A0\u5728\u5F15\u5BFC\u7C7B\u8DEF\u5F84\u672B\u5C3E\n -Xcheck:jni \u5BF9 JNI \u51FD\u6570\u6267\u884C\u5176\u4ED6\u68C0\u67E5\n -Xcomp \u5728\u9996\u6B21\u8C03\u7528\u65F6\u5F3A\u5236\u7F16\u8BD1\u65B9\u6CD5\n -Xdebug \u4E3A\u5B9E\u73B0\u5411\u540E\u517C\u5BB9\u800C\u63D0\u4F9B\n -Xdiag \u663E\u793A\u9644\u52A0\u8BCA\u65AD\u6D88\u606F\n -Xfuture \u542F\u7528\u6700\u4E25\u683C\u7684\u68C0\u67E5\uFF0C\u9884\u671F\u5C06\u6765\u7684\u9ED8\u8BA4\u503C\n -Xint \u4EC5\u89E3\u91CA\u6A21\u5F0F\u6267\u884C\n -Xinternalversion\n \u663E\u793A\u6BD4 -version \u9009\u9879\u66F4\u8BE6\u7EC6\u7684 JVM\n \u7248\u672C\u4FE1\u606F\n -Xloggc:<\u6587\u4EF6> \u5C06 GC \u72B6\u6001\u8BB0\u5F55\u5728\u6587\u4EF6\u4E2D\uFF08\u5E26\u65F6\u95F4\u6233\uFF09\n -Xmixed \u6DF7\u5408\u6A21\u5F0F\u6267\u884C\uFF08\u9ED8\u8BA4\u503C\uFF09\n -Xmn<\u5927\u5C0F> \u4E3A\u5E74\u8F7B\u4EE3\uFF08\u65B0\u751F\u4EE3\uFF09\u8BBE\u7F6E\u521D\u59CB\u548C\u6700\u5927\u5806\u5927\u5C0F\n \uFF08\u4EE5\u5B57\u8282\u4E3A\u5355\u4F4D\uFF09\n -Xms<\u5927\u5C0F> \u8BBE\u7F6E\u521D\u59CB Java \u5806\u5927\u5C0F\n -Xmx<\u5927\u5C0F> \u8BBE\u7F6E\u6700\u5927 Java \u5806\u5927\u5C0F\n -Xnoclassgc \u7981\u7528\u7C7B\u5783\u573E\u6536\u96C6\n -Xrs \u51CF\u5C11 Java/VM \u5BF9\u64CD\u4F5C\u7CFB\u7EDF\u4FE1\u53F7\u7684\u4F7F\u7528\uFF08\u8BF7\u53C2\u89C1\u6587\u6863\uFF09\n -Xshare:auto \u5728\u53EF\u80FD\u7684\u60C5\u51B5\u4E0B\u4F7F\u7528\u5171\u4EAB\u7C7B\u6570\u636E\uFF08\u9ED8\u8BA4\u503C\uFF09\n -Xshare:off \u4E0D\u5C1D\u8BD5\u4F7F\u7528\u5171\u4EAB\u7C7B\u6570\u636E\n -Xshare:on \u8981\u6C42\u4F7F\u7528\u5171\u4EAB\u7C7B\u6570\u636E\uFF0C\u5426\u5219\u5C06\u5931\u8D25\u3002\n -XshowSettings \u663E\u793A\u6240\u6709\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:all\n \u663E\u793A\u6240\u6709\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:locale\n \u663E\u793A\u6240\u6709\u4E0E\u533A\u57DF\u8BBE\u7F6E\u76F8\u5173\u7684\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:properties\n \u663E\u793A\u6240\u6709\u5C5E\u6027\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:vm\n \u663E\u793A\u6240\u6709\u4E0E vm \u76F8\u5173\u7684\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:system\n \uFF08\u4EC5 Linux\uFF09\u663E\u793A\u4E3B\u673A\u7CFB\u7EDF\u6216\u5BB9\u5668\n \u914D\u7F6E\u5E76\u7EE7\u7EED\n -Xss<\u5927\u5C0F> \u8BBE\u7F6E Java \u7EBF\u7A0B\u5806\u6808\u5927\u5C0F\n -Xverify \u8BBE\u7F6E\u5B57\u8282\u7801\u9A8C\u8BC1\u5668\u7684\u6A21\u5F0F\n --add-reads <\u6A21\u5757>=<\u76EE\u6807\u6A21\u5757>(,<\u76EE\u6807\u6A21\u5757>)*\n \u66F4\u65B0 <\u6A21\u5757> \u4EE5\u8BFB\u53D6 <\u76EE\u6807\u6A21\u5757>\uFF0C\u800C\u65E0\u8BBA\n \u6A21\u5757\u58F0\u660E\u5982\u4F55\u3002\n <\u76EE\u6807\u6A21\u5757> \u53EF\u4EE5\u662F ALL-UNNAMED \u4EE5\u8BFB\u53D6\u6240\u6709\u672A\u547D\u540D\n \u6A21\u5757\u3002\n --add-exports <\u6A21\u5757>/<\u7A0B\u5E8F\u5305>=<\u76EE\u6807\u6A21\u5757>(,<\u76EE\u6807\u6A21\u5757>)*\n \u66F4\u65B0 <\u6A21\u5757> \u4EE5\u5C06 <\u7A0B\u5E8F\u5305> \u5BFC\u51FA\u5230 <\u76EE\u6807\u6A21\u5757>\uFF0C\n \u800C\u65E0\u8BBA\u6A21\u5757\u58F0\u660E\u5982\u4F55\u3002\n <\u76EE\u6807\u6A21\u5757> \u53EF\u4EE5\u662F ALL-UNNAMED \u4EE5\u5BFC\u51FA\u5230\u6240\u6709\n \
\u672A\u547D\u540D\u6A21\u5757\u3002\n --add-opens <\u6A21\u5757>/<\u7A0B\u5E8F\u5305>=<\u76EE\u6807\u6A21\u5757>(,<\u76EE\u6807\u6A21\u5757>)*\n \u66F4\u65B0 <\u6A21\u5757> \u4EE5\u5728 <\u76EE\u6807\u6A21\u5757> \u4E2D\u6253\u5F00\n <\u7A0B\u5E8F\u5305>\uFF0C\u800C\u65E0\u8BBA\u6A21\u5757\u58F0\u660E\u5982\u4F55\u3002\n --illegal-access=<\u503C>\n \u5141\u8BB8\u6216\u62D2\u7EDD\u901A\u8FC7\u672A\u547D\u540D\u6A21\u5757\u4E2D\u7684\u4EE3\u7801\u5BF9\u547D\u540D\u6A21\u5757\u4E2D\u7684\n \u7C7B\u578B\u6210\u5458\u8FDB\u884C\u8BBF\u95EE\u3002\n <\u503C> \u4E3A "deny"\u3001"permit"\u3001"warn" \u6216 "debug" \u4E4B\u4E00\n \u6B64\u9009\u9879\u5C06\u5728\u672A\u6765\u53D1\u884C\u7248\u4E2D\u5220\u9664\u3002\n --limit-modules <\u6A21\u5757\u540D>[,<\u6A21\u5757\u540D>...]\n \u9650\u5236\u53EF\u89C2\u5BDF\u6A21\u5757\u7684\u9886\u57DF\n --patch-module <\u6A21\u5757>=<\u6587\u4EF6>({0}<\u6587\u4EF6>)*\n \u4F7F\u7528 JAR \u6587\u4EF6\u6216\u76EE\u5F55\u4E2D\u7684\u7C7B\u548C\u8D44\u6E90\n \u8986\u76D6\u6216\u589E\u5F3A\u6A21\u5757\u3002\n --disable-@files \u7981\u6B62\u8FDB\u4E00\u6B65\u6269\u5C55\u53C2\u6570\u6587\u4EF6\n --source <\u7248\u672C>\n \u8BBE\u7F6E\u6E90\u6587\u4EF6\u6A21\u5F0F\u4E2D\u6E90\u7684\u7248\u672C\u3002\n\n\u8FD9\u4E9B\u989D\u5916\u9009\u9879\u5982\u6709\u66F4\u6539\uFF0C\u6055\u4E0D\u53E6\u884C\u901A\u77E5\u3002\n
java.launcher.X.usage=\n -Xbatch \u7981\u7528\u540E\u53F0\u7F16\u8BD1\n -Xbootclasspath/a:<\u4EE5 {0} \u5206\u9694\u7684\u76EE\u5F55\u548C zip/jar \u6587\u4EF6>\n \u9644\u52A0\u5728\u5F15\u5BFC\u7C7B\u8DEF\u5F84\u672B\u5C3E\n -Xcheck:jni \u5BF9 JNI \u51FD\u6570\u6267\u884C\u5176\u4ED6\u68C0\u67E5\n -Xcomp \u5F3A\u5236\u5728\u9996\u6B21\u8C03\u7528\u65F6\u7F16\u8BD1\u65B9\u6CD5\n -Xdebug \u4E3A\u5B9E\u73B0\u5411\u540E\u517C\u5BB9\u800C\u63D0\u4F9B\n -Xdiag \u663E\u793A\u9644\u52A0\u8BCA\u65AD\u6D88\u606F\n -Xfuture \u542F\u7528\u6700\u4E25\u683C\u7684\u68C0\u67E5\uFF0C\u9884\u671F\u5C06\u6765\u7684\u9ED8\u8BA4\u503C\n \u6B64\u9009\u9879\u5DF2\u8FC7\u65F6\uFF0C\u53EF\u80FD\u4F1A\u5728\n \u672A\u6765\u53D1\u884C\u7248\u4E2D\u5220\u9664\u3002\n -Xint \u4EC5\u89E3\u91CA\u6A21\u5F0F\u6267\u884C\n -Xinternalversion\n \u663E\u793A\u6BD4 -version \u9009\u9879\u66F4\u8BE6\u7EC6\u7684\n JVM \u7248\u672C\u4FE1\u606F\n -Xlog:<opts> \u914D\u7F6E\u6216\u542F\u7528\u91C7\u7528 Java \u865A\u62DF\n \u673A (Java Virtual Machine, JVM) \u7EDF\u4E00\u8BB0\u5F55\u6846\u67B6\u8FDB\u884C\u4E8B\u4EF6\u8BB0\u5F55\u3002\u4F7F\u7528 -Xlog:help\n \u53EF\u4E86\u89E3\u8BE6\u7EC6\u4FE1\u606F\u3002\n -Xloggc:<file> \u5C06 GC \u72B6\u6001\u8BB0\u5F55\u5728\u6587\u4EF6\u4E2D\uFF08\u5E26\u65F6\u95F4\u6233\uFF09\n -Xmixed \u6DF7\u5408\u6A21\u5F0F\u6267\u884C\uFF08\u9ED8\u8BA4\u503C\uFF09\n -Xmn<size> \u4E3A\u5E74\u8F7B\u4EE3\uFF08\u65B0\u751F\u4EE3\uFF09\u8BBE\u7F6E\u521D\u59CB\u548C\u6700\u5927\u5806\u5927\u5C0F\n \uFF08\u4EE5\u5B57\u8282\u4E3A\u5355\u4F4D\uFF09\n -Xms<size> \u8BBE\u7F6E\u521D\u59CB Java \u5806\u5927\u5C0F\n -Xmx<size> \u8BBE\u7F6E\u6700\u5927 Java \u5806\u5927\u5C0F\n -Xnoclassgc \u7981\u7528\u7C7B\u5783\u573E\u6536\u96C6\n -Xrs \u51CF\u5C11 Java/VM \u5BF9\u64CD\u4F5C\u7CFB\u7EDF\u4FE1\u53F7\u7684\u4F7F\u7528\uFF08\u8BF7\u53C2\u89C1\u6587\u6863\uFF09\n -Xshare:auto \u5728\u53EF\u80FD\u7684\u60C5\u51B5\u4E0B\u4F7F\u7528\u5171\u4EAB\u7C7B\u6570\u636E\uFF08\u9ED8\u8BA4\u503C\uFF09\n -Xshare:off \u4E0D\u5C1D\u8BD5\u4F7F\u7528\u5171\u4EAB\u7C7B\u6570\u636E\n -Xshare:on \u8981\u6C42\u4F7F\u7528\u5171\u4EAB\u7C7B\u6570\u636E\uFF0C\u5426\u5219\u5C06\u5931\u8D25\u3002\n -XshowSettings \u663E\u793A\u6240\u6709\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:all\n \u663E\u793A\u6240\u6709\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:locale\n \u663E\u793A\u6240\u6709\u4E0E\u533A\u57DF\u8BBE\u7F6E\u76F8\u5173\u7684\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:properties\n \u663E\u793A\u6240\u6709\u5C5E\u6027\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:vm\n \u663E\u793A\u6240\u6709\u4E0E vm \u76F8\u5173\u7684\u8BBE\u7F6E\u5E76\u7EE7\u7EED\n -XshowSettings:system\n \uFF08\u4EC5 Linux\uFF09\u663E\u793A\u4E3B\u673A\u7CFB\u7EDF\u6216\u5BB9\u5668\n \u914D\u7F6E\u5E76\u7EE7\u7EED\n -Xss<size> \u8BBE\u7F6E Java \u7EBF\u7A0B\u5806\u6808\u5927\u5C0F\n -Xverify \u8BBE\u7F6E\u5B57\u8282\u7801\u9A8C\u8BC1\u5668\u7684\u6A21\u5F0F\n \u8BF7\u6CE8\u610F\uFF0C\u9009\u9879 -Xverify:none \u5DF2\u8FC7\u65F6\uFF0C\n \u53EF\u80FD\u4F1A\u5728\u672A\u6765\u53D1\u884C\u7248\u4E2D\u5220\u9664\u3002\n --add-reads <module>=<target-module>(,<target-module>)*\n \u66F4\u65B0 <module> \u4EE5\u8BFB\u53D6 <target-module>\uFF0C\u800C\u65E0\u8BBA\n \u6A21\u5757\u5982\u4F55\u58F0\u660E\u3002 \n <target-module> \u53EF\u4EE5\u662F \
ALL-UNNAMED\uFF0C\u5C06\u8BFB\u53D6\u6240\u6709\u672A\u547D\u540D\n \u6A21\u5757\u3002\n --add-exports <module>/<package>=<target-module>(,<target-module>)*\n \u66F4\u65B0 <module> \u4EE5\u5C06 <package> \u5BFC\u51FA\u5230 <target-module>\uFF0C\n \u800C\u65E0\u8BBA\u6A21\u5757\u5982\u4F55\u58F0\u660E\u3002\n <target-module> \u53EF\u4EE5\u662F ALL-UNNAMED\uFF0C\u5C06\u5BFC\u51FA\u5230\u6240\u6709\n \u672A\u547D\u540D\u6A21\u5757\u3002\n --add-opens <module>/<package>=<target-module>(,<target-module>)*\n \u66F4\u65B0 <module> \u4EE5\u5728 <target-module> \u4E2D\u6253\u5F00\n <package>\uFF0C\u800C\u65E0\u8BBA\u6A21\u5757\u5982\u4F55\u58F0\u660E\u3002\n --illegal-access=<value>\n \u5141\u8BB8\u6216\u62D2\u7EDD\u901A\u8FC7\u672A\u547D\u540D\u6A21\u5757\u4E2D\u7684\u4EE3\u7801\u5BF9\u547D\u540D\u6A21\u5757\u4E2D\u7684\n \u7C7B\u578B\u6210\u5458\u8FDB\u884C\u8BBF\u95EE\u3002\n <value> \u4E3A "deny"\u3001"permit"\u3001"warn" \u6216 "debug" \u4E4B\u4E00\n \u6B64\u9009\u9879\u5C06\u5728\u672A\u6765\u53D1\u884C\u7248\u4E2D\u5220\u9664\u3002\n --limit-modules <module name>[,<module name>...]\n \u9650\u5236\u53EF\u89C2\u5BDF\u6A21\u5757\u7684\u9886\u57DF\n --patch-module <module>=<file>({0}<file>)*\n \u4F7F\u7528 JAR \u6587\u4EF6\u6216\u76EE\u5F55\u4E2D\u7684\u7C7B\u548C\u8D44\u6E90\n \u8986\u76D6\u6216\u589E\u5F3A\u6A21\u5757\u3002\n --disable-@files \u7981\u6B62\u8FDB\u4E00\u6B65\u6269\u5C55\u53C2\u6570\u6587\u4EF6\n --source <version>\n \u8BBE\u7F6E\u6E90\u6587\u4EF6\u6A21\u5F0F\u4E2D\u6E90\u7684\u7248\u672C\u3002\n\n\u8FD9\u4E9B\u989D\u5916\u9009\u9879\u5982\u6709\u66F4\u6539, \u6055\u4E0D\u53E6\u884C\u901A\u77E5\u3002\n
# Translators please note do not translate the options themselves
java.launcher.X.macosx.usage=\n\u4EE5\u4E0B\u9009\u9879\u4E3A Mac OS X \u7279\u5B9A\u7684\u9009\u9879:\n -XstartOnFirstThread\n \u5728\u7B2C\u4E00\u4E2A (AppKit) \u7EBF\u7A0B\u4E0A\u8FD0\u884C main() \u65B9\u6CD5\n -Xdock:name=<\u5E94\u7528\u7A0B\u5E8F\u540D\u79F0>\n \u8986\u76D6\u505C\u9760\u680F\u4E2D\u663E\u793A\u7684\u9ED8\u8BA4\u5E94\u7528\u7A0B\u5E8F\u540D\u79F0\n -Xdock:icon=<\u56FE\u6807\u6587\u4EF6\u7684\u8DEF\u5F84>\n \u8986\u76D6\u505C\u9760\u680F\u4E2D\u663E\u793A\u7684\u9ED8\u8BA4\u56FE\u6807\n\n
@ -57,4 +57,4 @@ java.launcher.module.error1=\u6A21\u5757 {0} \u4E0D\u5177\u6709 ModuleMainClass
java.launcher.module.error2=\u9519\u8BEF: \u5728\u6A21\u5757 {1} \u4E2D\u627E\u4E0D\u5230\u6216\u65E0\u6CD5\u52A0\u8F7D\u4E3B\u7C7B {0}
java.launcher.module.error3=\u9519\u8BEF: \u65E0\u6CD5\u5C06\u4E3B\u7C7B {0} \u52A0\u8F7D\u5230\u6A21\u5757 {1} \u4E2D\n\t{2}
java.launcher.module.error4=\u627E\u4E0D\u5230{0}
java.launcher.module.error5=\u9519\u8BEF: \u65E0\u6CD5\u521D\u59CB\u5316\u6A21\u5757 {1} \u4E2D\u7684\u4E3B\u7C7B {0}\n\u539F\u56E0: {1}: {2}
java.launcher.module.error5=\u9519\u8BEF: \u65E0\u6CD5\u521D\u59CB\u5316\u6A21\u5757 {1} \u4E2D\u7684\u4E3B\u7C7B {0}\n\u539F\u56E0: {2}: {3}

@ -95,6 +95,8 @@ public class Resources_ja extends java.util.ListResourceBundle {
"\u81EA\u5DF1\u7F72\u540D\u578B\u8A3C\u660E\u66F8\u3092\u751F\u6210\u3057\u307E\u3059"}, //-selfcert
{"Changes.the.store.password.of.a.keystore",
"\u30AD\u30FC\u30B9\u30C8\u30A2\u306E\u30B9\u30C8\u30A2\u30FB\u30D1\u30B9\u30EF\u30FC\u30C9\u3092\u5909\u66F4\u3057\u307E\u3059"}, //-storepasswd
{"showinfo.command.help", "\u30BB\u30AD\u30E5\u30EA\u30C6\u30A3\u95A2\u9023\u60C5\u5831\u3092\u8868\u793A\u3057\u307E\u3059"},
// keytool: help: options
{"alias.name.of.the.entry.to.process",
"\u51E6\u7406\u3059\u308B\u30A8\u30F3\u30C8\u30EA\u306E\u5225\u540D"}, //-alias
@ -140,6 +142,7 @@ public class Resources_ja extends java.util.ListResourceBundle {
"\u30D7\u30ED\u30F3\u30D7\u30C8\u3092\u8868\u793A\u3057\u306A\u3044"}, //-noprompt
{"password.through.protected.mechanism",
"\u4FDD\u8B77\u30E1\u30AB\u30CB\u30BA\u30E0\u306B\u3088\u308B\u30D1\u30B9\u30EF\u30FC\u30C9"}, //-protected
{"tls.option.help", "TLS\u69CB\u6210\u60C5\u5831\u3092\u8868\u793A\u3057\u307E\u3059"},
// The following 2 values should span 2 lines, the first for the
// option itself, the second for its -providerArg value.
@ -250,7 +253,6 @@ public class Resources_ja extends java.util.ListResourceBundle {
{"Keystore.password.is.too.short.must.be.at.least.6.characters",
"\u30AD\u30FC\u30B9\u30C8\u30A2\u306E\u30D1\u30B9\u30EF\u30FC\u30C9\u304C\u77ED\u3059\u304E\u307E\u3059 - 6\u6587\u5B57\u4EE5\u4E0A\u306B\u3057\u3066\u304F\u3060\u3055\u3044"},
{"Unknown.Entry.Type", "\u4E0D\u660E\u306A\u30A8\u30F3\u30C8\u30EA\u30FB\u30BF\u30A4\u30D7"},
{"Too.many.failures.Alias.not.changed", "\u969C\u5BB3\u304C\u591A\u3059\u304E\u307E\u3059\u3002\u5225\u540D\u306F\u5909\u66F4\u3055\u308C\u307E\u305B\u3093"},
{"Entry.for.alias.alias.successfully.imported.",
"\u5225\u540D{0}\u306E\u30A8\u30F3\u30C8\u30EA\u306E\u30A4\u30F3\u30DD\u30FC\u30C8\u306B\u6210\u529F\u3057\u307E\u3057\u305F\u3002"},
{"Entry.for.alias.alias.not.imported.", "\u5225\u540D{0}\u306E\u30A8\u30F3\u30C8\u30EA\u306F\u30A4\u30F3\u30DD\u30FC\u30C8\u3055\u308C\u307E\u305B\u3093\u3067\u3057\u305F\u3002"},
@ -311,10 +313,6 @@ public class Resources_ja extends java.util.ListResourceBundle {
{"Too.many.failures.Key.entry.not.cloned",
"\u969C\u5BB3\u304C\u591A\u3059\u304E\u307E\u3059\u3002\u30AD\u30FC\u30FB\u30A8\u30F3\u30C8\u30EA\u306E\u30AF\u30ED\u30FC\u30F3\u306F\u4F5C\u6210\u3055\u308C\u307E\u305B\u3093\u3067\u3057\u305F"},
{"key.password.for.alias.", "<{0}>\u306E\u30AD\u30FC\u306E\u30D1\u30B9\u30EF\u30FC\u30C9"},
{"Keystore.entry.for.id.getName.already.exists",
"<{0}>\u306E\u30AD\u30FC\u30B9\u30C8\u30A2\u30FB\u30A8\u30F3\u30C8\u30EA\u306F\u3059\u3067\u306B\u5B58\u5728\u3057\u307E\u3059"},
{"Creating.keystore.entry.for.id.getName.",
"<{0}>\u306E\u30AD\u30FC\u30B9\u30C8\u30A2\u30FB\u30A8\u30F3\u30C8\u30EA\u3092\u4F5C\u6210\u4E2D..."},
{"No.entries.from.identity.database.added",
"\u30A2\u30A4\u30C7\u30F3\u30C6\u30A3\u30C6\u30A3\u30FB\u30C7\u30FC\u30BF\u30D9\u30FC\u30B9\u304B\u3089\u8FFD\u52A0\u3055\u308C\u305F\u30A8\u30F3\u30C8\u30EA\u306F\u3042\u308A\u307E\u305B\u3093"},
{"Alias.name.alias", "\u5225\u540D: {0}"},
@ -352,7 +350,6 @@ public class Resources_ja extends java.util.ListResourceBundle {
{"Do.you.still.want.to.add.it.to.your.own.keystore.no.",
"\u30AD\u30FC\u30B9\u30C8\u30A2\u306B\u8FFD\u52A0\u3057\u307E\u3059\u304B\u3002 [\u3044\u3044\u3048]: "},
{"Trust.this.certificate.no.", "\u3053\u306E\u8A3C\u660E\u66F8\u3092\u4FE1\u983C\u3057\u307E\u3059\u304B\u3002 [\u3044\u3044\u3048]: "},
{"YES", "\u306F\u3044"},
{"New.prompt.", "\u65B0\u898F{0}: "},
{"Passwords.must.differ", "\u30D1\u30B9\u30EF\u30FC\u30C9\u306F\u7570\u306A\u3063\u3066\u3044\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059"},
{"Re.enter.new.prompt.", "\u65B0\u898F{0}\u3092\u518D\u5165\u529B\u3057\u3066\u304F\u3060\u3055\u3044: "},
@ -392,7 +389,6 @@ public class Resources_ja extends java.util.ListResourceBundle {
{"Signer.d.", "\u7F72\u540D\u8005\u756A\u53F7%d:"},
{"Timestamp.", "\u30BF\u30A4\u30E0\u30B9\u30BF\u30F3\u30D7:"},
{"Signature.", "\u7F72\u540D:"},
{"CRLs.", "CRL:"},
{"Certificate.owner.", "\u8A3C\u660E\u66F8\u306E\u6240\u6709\u8005: "},
{"Not.a.signed.jar.file", "\u7F72\u540D\u4ED8\u304DJAR\u30D5\u30A1\u30A4\u30EB\u3067\u306F\u3042\u308A\u307E\u305B\u3093"},
{"No.certificate.from.the.SSL.server",
@ -407,13 +403,10 @@ public class Resources_ja extends java.util.ListResourceBundle {
"\u8A3C\u660E\u66F8\u5FDC\u7B54\u306B\u306F\u3001<{0}>\u306E\u516C\u958B\u30AD\u30FC\u306F\u542B\u307E\u308C\u307E\u305B\u3093"},
{"Incomplete.certificate.chain.in.reply",
"\u5FDC\u7B54\u3057\u305F\u8A3C\u660E\u66F8\u30C1\u30A7\u30FC\u30F3\u306F\u4E0D\u5B8C\u5168\u3067\u3059"},
{"Certificate.chain.in.reply.does.not.verify.",
"\u5FDC\u7B54\u3057\u305F\u8A3C\u660E\u66F8\u30C1\u30A7\u30FC\u30F3\u306F\u691C\u8A3C\u3055\u308C\u3066\u3044\u307E\u305B\u3093: "},
{"Top.level.certificate.in.reply.",
"\u5FDC\u7B54\u3057\u305F\u30C8\u30C3\u30D7\u30EC\u30D9\u30EB\u306E\u8A3C\u660E\u66F8:\n"},
{".is.not.trusted.", "... \u306F\u4FE1\u983C\u3055\u308C\u3066\u3044\u307E\u305B\u3093\u3002 "},
{"Install.reply.anyway.no.", "\u5FDC\u7B54\u3092\u30A4\u30F3\u30B9\u30C8\u30FC\u30EB\u3057\u307E\u3059\u304B\u3002[\u3044\u3044\u3048]: "},
{"NO", "\u3044\u3044\u3048"},
{"Public.keys.in.reply.and.keystore.don.t.match",
"\u5FDC\u7B54\u3057\u305F\u516C\u958B\u30AD\u30FC\u3068\u30AD\u30FC\u30B9\u30C8\u30A2\u304C\u4E00\u81F4\u3057\u307E\u305B\u3093"},
{"Certificate.reply.and.certificate.in.keystore.are.identical",
@ -474,6 +467,8 @@ public class Resources_ja extends java.util.ListResourceBundle {
{"backup.keystore.warning", "\u5143\u306E\u30AD\u30FC\u30B9\u30C8\u30A2\"%1$s\"\u306F\"%3$s\"\u3068\u3057\u3066\u30D0\u30C3\u30AF\u30A2\u30C3\u30D7\u3055\u308C\u307E\u3059..."},
{"importing.keystore.status", "\u30AD\u30FC\u30B9\u30C8\u30A2%1$s\u3092%2$s\u306B\u30A4\u30F3\u30DD\u30FC\u30C8\u3057\u3066\u3044\u307E\u3059..."},
{"keyalg.option.1.missing.warning", "-keyalg\u30AA\u30D7\u30B7\u30E7\u30F3\u304C\u3042\u308A\u307E\u305B\u3093\u3002\u30C7\u30D5\u30A9\u30EB\u30C8\u306E\u30AD\u30FC\u30FB\u30A2\u30EB\u30B4\u30EA\u30BA\u30E0(%s)\u306F\u3001\u65E7\u5F0F\u306E\u30A2\u30EB\u30B4\u30EA\u30BA\u30E0\u3067\u3001\u73FE\u5728\u306F\u63A8\u5968\u3055\u308C\u307E\u305B\u3093\u3002JDK\u306E\u5F8C\u7D9A\u306E\u30EA\u30EA\u30FC\u30B9\u3067\u306F\u3001\u30C7\u30D5\u30A9\u30EB\u30C8\u306F\u524A\u9664\u3055\u308C\u308B\u4E88\u5B9A\u3067\u3001-keyalg\u30AA\u30D7\u30B7\u30E7\u30F3\u3092\u6307\u5B9A\u3059\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
{"showinfo.no.option", "-showinfo\u306E\u30AA\u30D7\u30B7\u30E7\u30F3\u304C\u3042\u308A\u307E\u305B\u3093\u3002\"keytool -showinfo -tls\"\u3092\u8A66\u3057\u3066\u304F\u3060\u3055\u3044\u3002"},
};

Some files were not shown because too many files have changed in this diff Show More