8234574: Rename prediction methods in G1Analytics
Reviewed-by: sjohanss, sangheki
This commit is contained in:
parent
c0d5a70efb
commit
eaca9f8846
@ -125,16 +125,16 @@ bool G1Analytics::enough_samples_available(TruncatedSeq const* seq) const {
|
||||
return seq->num() >= 3;
|
||||
}
|
||||
|
||||
double G1Analytics::get_new_unit_prediction(TruncatedSeq const* seq) const {
|
||||
return _predictor->get_new_unit_prediction(seq);
|
||||
double G1Analytics::predict_in_unit_interval(TruncatedSeq const* seq) const {
|
||||
return _predictor->predict_in_unit_interval(seq);
|
||||
}
|
||||
|
||||
size_t G1Analytics::get_new_size_prediction(TruncatedSeq const* seq) const {
|
||||
return (size_t)get_new_lower_zero_bound_prediction(seq);
|
||||
size_t G1Analytics::predict_size(TruncatedSeq const* seq) const {
|
||||
return (size_t)predict_zero_bounded(seq);
|
||||
}
|
||||
|
||||
double G1Analytics::get_new_lower_zero_bound_prediction(TruncatedSeq const* seq) const {
|
||||
return _predictor->get_new_lower_zero_bound_prediction(seq);
|
||||
double G1Analytics::predict_zero_bounded(TruncatedSeq const* seq) const {
|
||||
return _predictor->predict_zero_bounded(seq);
|
||||
}
|
||||
|
||||
int G1Analytics::num_alloc_rate_ms() const {
|
||||
@ -229,50 +229,50 @@ void G1Analytics::report_rs_length(double rs_length) {
|
||||
}
|
||||
|
||||
double G1Analytics::predict_alloc_rate_ms() const {
|
||||
return get_new_lower_zero_bound_prediction(_alloc_rate_ms_seq);
|
||||
return predict_zero_bounded(_alloc_rate_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_concurrent_refine_rate_ms() const {
|
||||
return get_new_lower_zero_bound_prediction(_concurrent_refine_rate_ms_seq);
|
||||
return predict_zero_bounded(_concurrent_refine_rate_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_logged_cards_rate_ms() const {
|
||||
return get_new_lower_zero_bound_prediction(_logged_cards_rate_ms_seq);
|
||||
return predict_zero_bounded(_logged_cards_rate_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_young_card_merge_to_scan_ratio() const {
|
||||
return get_new_unit_prediction(_young_card_merge_to_scan_ratio_seq);
|
||||
return predict_in_unit_interval(_young_card_merge_to_scan_ratio_seq);
|
||||
}
|
||||
|
||||
size_t G1Analytics::predict_scan_card_num(size_t rs_length, bool for_young_gc) const {
|
||||
if (for_young_gc || !enough_samples_available(_mixed_card_merge_to_scan_ratio_seq)) {
|
||||
return (size_t)(rs_length * predict_young_card_merge_to_scan_ratio());
|
||||
} else {
|
||||
return (size_t)(rs_length * get_new_unit_prediction(_mixed_card_merge_to_scan_ratio_seq));
|
||||
return (size_t)(rs_length * predict_in_unit_interval(_mixed_card_merge_to_scan_ratio_seq));
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_card_merge_time_ms(size_t card_num, bool for_young_gc) const {
|
||||
if (for_young_gc || !enough_samples_available(_mixed_cost_per_card_merge_ms_seq)) {
|
||||
return card_num * get_new_lower_zero_bound_prediction(_young_cost_per_card_merge_ms_seq);
|
||||
return card_num * predict_zero_bounded(_young_cost_per_card_merge_ms_seq);
|
||||
} else {
|
||||
return card_num * get_new_lower_zero_bound_prediction(_mixed_cost_per_card_merge_ms_seq);
|
||||
return card_num * predict_zero_bounded(_mixed_cost_per_card_merge_ms_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_card_scan_time_ms(size_t card_num, bool for_young_gc) const {
|
||||
if (for_young_gc || !enough_samples_available(_mixed_cost_per_card_scan_ms_seq)) {
|
||||
return card_num * get_new_lower_zero_bound_prediction(_young_cost_per_card_scan_ms_seq);
|
||||
return card_num * predict_zero_bounded(_young_cost_per_card_scan_ms_seq);
|
||||
} else {
|
||||
return card_num * get_new_lower_zero_bound_prediction(_mixed_cost_per_card_scan_ms_seq);
|
||||
return card_num * predict_zero_bounded(_mixed_cost_per_card_scan_ms_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
|
||||
if (!enough_samples_available(_cost_per_byte_ms_during_cm_seq)) {
|
||||
return (1.1 * bytes_to_copy) * get_new_lower_zero_bound_prediction(_copy_cost_per_byte_ms_seq);
|
||||
return (1.1 * bytes_to_copy) * predict_zero_bounded(_copy_cost_per_byte_ms_seq);
|
||||
} else {
|
||||
return bytes_to_copy * get_new_lower_zero_bound_prediction(_cost_per_byte_ms_during_cm_seq);
|
||||
return bytes_to_copy * predict_zero_bounded(_cost_per_byte_ms_during_cm_seq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -280,36 +280,36 @@ double G1Analytics::predict_object_copy_time_ms(size_t bytes_to_copy, bool durin
|
||||
if (during_concurrent_mark) {
|
||||
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
|
||||
} else {
|
||||
return bytes_to_copy * get_new_lower_zero_bound_prediction(_copy_cost_per_byte_ms_seq);
|
||||
return bytes_to_copy * predict_zero_bounded(_copy_cost_per_byte_ms_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_constant_other_time_ms() const {
|
||||
return get_new_lower_zero_bound_prediction(_constant_other_time_ms_seq);
|
||||
return predict_zero_bounded(_constant_other_time_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_young_other_time_ms(size_t young_num) const {
|
||||
return young_num * get_new_lower_zero_bound_prediction(_young_other_cost_per_region_ms_seq);
|
||||
return young_num * predict_zero_bounded(_young_other_cost_per_region_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_non_young_other_time_ms(size_t non_young_num) const {
|
||||
return non_young_num * get_new_lower_zero_bound_prediction(_non_young_other_cost_per_region_ms_seq);
|
||||
return non_young_num * predict_zero_bounded(_non_young_other_cost_per_region_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_remark_time_ms() const {
|
||||
return get_new_lower_zero_bound_prediction(_concurrent_mark_remark_times_ms);
|
||||
return predict_zero_bounded(_concurrent_mark_remark_times_ms);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_cleanup_time_ms() const {
|
||||
return get_new_lower_zero_bound_prediction(_concurrent_mark_cleanup_times_ms);
|
||||
return predict_zero_bounded(_concurrent_mark_cleanup_times_ms);
|
||||
}
|
||||
|
||||
size_t G1Analytics::predict_rs_length() const {
|
||||
return get_new_size_prediction(_rs_length_seq) + get_new_size_prediction(_rs_length_diff_seq);
|
||||
return predict_size(_rs_length_seq) + predict_size(_rs_length_diff_seq);
|
||||
}
|
||||
|
||||
size_t G1Analytics::predict_pending_cards() const {
|
||||
return get_new_size_prediction(_pending_cards_seq);
|
||||
return predict_size(_pending_cards_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::last_known_gc_end_time_sec() const {
|
||||
|
@ -84,9 +84,9 @@ class G1Analytics: public CHeapObj<mtGC> {
|
||||
// The constant used is random but "small".
|
||||
bool enough_samples_available(TruncatedSeq const* seq) const;
|
||||
|
||||
double get_new_unit_prediction(TruncatedSeq const* seq) const;
|
||||
size_t get_new_size_prediction(TruncatedSeq const* seq) const;
|
||||
double get_new_lower_zero_bound_prediction(TruncatedSeq const* seq) const;
|
||||
double predict_in_unit_interval(TruncatedSeq const* seq) const;
|
||||
size_t predict_size(TruncatedSeq const* seq) const;
|
||||
double predict_zero_bounded(TruncatedSeq const* seq) const;
|
||||
|
||||
public:
|
||||
G1Analytics(const G1Predictions* predictor);
|
||||
|
@ -2589,7 +2589,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
bool do_stealing = do_termination && !is_serial;
|
||||
|
||||
G1Predictions const& predictor = _g1h->policy()->predictor();
|
||||
double diff_prediction_ms = predictor.get_new_lower_zero_bound_prediction(&_marking_step_diff_ms);
|
||||
double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms);
|
||||
_time_target_ms = time_target_ms - diff_prediction_ms;
|
||||
|
||||
// set up the variables that are used in the work-based scheme to
|
||||
|
@ -113,8 +113,8 @@ size_t G1AdaptiveIHOPControl::actual_target_threshold() const {
|
||||
);
|
||||
}
|
||||
|
||||
double G1AdaptiveIHOPControl::get_new_prediction(TruncatedSeq const* seq) const {
|
||||
return _predictor->get_new_lower_zero_bound_prediction(seq);
|
||||
double G1AdaptiveIHOPControl::predict(TruncatedSeq const* seq) const {
|
||||
return _predictor->predict_zero_bounded(seq);
|
||||
}
|
||||
|
||||
bool G1AdaptiveIHOPControl::have_enough_data_for_prediction() const {
|
||||
@ -124,8 +124,8 @@ bool G1AdaptiveIHOPControl::have_enough_data_for_prediction() const {
|
||||
|
||||
size_t G1AdaptiveIHOPControl::get_conc_mark_start_threshold() {
|
||||
if (have_enough_data_for_prediction()) {
|
||||
double pred_marking_time = get_new_prediction(&_marking_times_s);
|
||||
double pred_promotion_rate = get_new_prediction(&_allocation_rate_s);
|
||||
double pred_marking_time = predict(&_marking_times_s);
|
||||
double pred_promotion_rate = predict(&_allocation_rate_s);
|
||||
size_t pred_promotion_size = (size_t)(pred_marking_time * pred_promotion_rate);
|
||||
|
||||
size_t predicted_needed_bytes_during_marking =
|
||||
@ -172,8 +172,8 @@ void G1AdaptiveIHOPControl::print() {
|
||||
actual_target,
|
||||
G1CollectedHeap::heap()->used(),
|
||||
_last_unrestrained_young_size,
|
||||
get_new_prediction(&_allocation_rate_s),
|
||||
get_new_prediction(&_marking_times_s) * 1000.0,
|
||||
predict(&_allocation_rate_s),
|
||||
predict(&_marking_times_s) * 1000.0,
|
||||
have_enough_data_for_prediction() ? "true" : "false");
|
||||
}
|
||||
|
||||
@ -183,7 +183,7 @@ void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer) {
|
||||
actual_target_threshold(),
|
||||
G1CollectedHeap::heap()->used(),
|
||||
_last_unrestrained_young_size,
|
||||
get_new_prediction(&_allocation_rate_s),
|
||||
get_new_prediction(&_marking_times_s),
|
||||
predict(&_allocation_rate_s),
|
||||
predict(&_marking_times_s),
|
||||
have_enough_data_for_prediction());
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
|
||||
size_t _last_unrestrained_young_size;
|
||||
|
||||
// Get a new prediction bounded below by zero from the given sequence.
|
||||
double get_new_prediction(TruncatedSeq const* seq) const;
|
||||
double predict(TruncatedSeq const* seq) const;
|
||||
|
||||
bool have_enough_data_for_prediction() const;
|
||||
|
||||
|
@ -54,16 +54,16 @@ class G1Predictions {
|
||||
// Confidence factor.
|
||||
double sigma() const { return _sigma; }
|
||||
|
||||
double get_new_prediction(TruncatedSeq const* seq) const {
|
||||
double predict(TruncatedSeq const* seq) const {
|
||||
return seq->davg() + _sigma * stddev_estimate(seq);
|
||||
}
|
||||
|
||||
double get_new_unit_prediction(TruncatedSeq const* seq) const {
|
||||
return clamp(get_new_prediction(seq), 0.0, 1.0);
|
||||
double predict_in_unit_interval(TruncatedSeq const* seq) const {
|
||||
return clamp(predict(seq), 0.0, 1.0);
|
||||
}
|
||||
|
||||
double get_new_lower_zero_bound_prediction(TruncatedSeq const* seq) const {
|
||||
return MAX2(get_new_prediction(seq), 0.0);
|
||||
double predict_zero_bounded(TruncatedSeq const* seq) const {
|
||||
return MAX2(predict(seq), 0.0);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -110,7 +110,7 @@ void SurvRateGroup::finalize_predictions(const G1Predictions& predictor) {
|
||||
double accum = 0.0;
|
||||
double pred = 0.0;
|
||||
for (size_t i = 0; i < _stats_arrays_length; ++i) {
|
||||
pred = predictor.get_new_unit_prediction(_surv_rate_predictors[i]);
|
||||
pred = predictor.predict_in_unit_interval(_surv_rate_predictors[i]);
|
||||
accum += pred;
|
||||
_accum_surv_rate_pred[i] = accum;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ public:
|
||||
|
||||
age = MIN2(age, (int)_stats_arrays_length - 1);
|
||||
|
||||
return predictor.get_new_unit_prediction(_surv_rate_predictors[age]);
|
||||
return predictor.predict_in_unit_interval(_surv_rate_predictors[age]);
|
||||
}
|
||||
|
||||
int next_age_index() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,17 +35,17 @@ TEST_VM(G1Predictions, basic_predictions) {
|
||||
G1Predictions predictor(0.0);
|
||||
TruncatedSeq s;
|
||||
|
||||
double p0 = predictor.get_new_prediction(&s);
|
||||
double p0 = predictor.predict(&s);
|
||||
ASSERT_LT(p0, epsilon) << "Initial prediction of empty sequence must be 0.0";
|
||||
|
||||
s.add(5.0);
|
||||
double p1 = predictor.get_new_prediction(&s);
|
||||
double p1 = predictor.predict(&s);
|
||||
ASSERT_NEAR(p1, 5.0, epsilon);
|
||||
|
||||
for (int i = 0; i < 40; i++) {
|
||||
s.add(5.0);
|
||||
}
|
||||
double p2 = predictor.get_new_prediction(&s);
|
||||
double p2 = predictor.predict(&s);
|
||||
ASSERT_NEAR(p2, 5.0, epsilon);
|
||||
}
|
||||
|
||||
@ -56,20 +56,20 @@ TEST_VM(G1Predictions, average_not_stdev_predictions) {
|
||||
TruncatedSeq s;
|
||||
|
||||
s.add(1.0);
|
||||
double p1 = predictor.get_new_prediction(&s);
|
||||
double p1 = predictor.predict(&s);
|
||||
ASSERT_GT(p1, s.davg()) << "First prediction must be greater than average";
|
||||
|
||||
s.add(1.0);
|
||||
double p2 = predictor.get_new_prediction(&s);
|
||||
double p2 = predictor.predict(&s);
|
||||
ASSERT_GT(p1, p2) << "First prediction must be greater than second";
|
||||
|
||||
s.add(1.0);
|
||||
double p3 = predictor.get_new_prediction(&s);
|
||||
double p3 = predictor.predict(&s);
|
||||
ASSERT_GT(p2, p3) << "Second prediction must be greater than third";
|
||||
|
||||
s.add(1.0);
|
||||
s.add(1.0); // Five elements are now in the sequence.
|
||||
double p4 = predictor.get_new_prediction(&s);
|
||||
double p4 = predictor.predict(&s);
|
||||
ASSERT_LT(p4, p3) << "Fourth prediction must be smaller than third";
|
||||
ASSERT_NEAR(p4, 1.0, epsilon);
|
||||
}
|
||||
@ -82,20 +82,20 @@ TEST_VM(G1Predictions, average_stdev_predictions) {
|
||||
TruncatedSeq s;
|
||||
|
||||
s.add(0.5);
|
||||
double p1 = predictor.get_new_prediction(&s);
|
||||
double p1 = predictor.predict(&s);
|
||||
ASSERT_GT(p1, s.davg()) << "First prediction must be greater than average";
|
||||
|
||||
s.add(0.2);
|
||||
double p2 = predictor.get_new_prediction(&s);
|
||||
double p2 = predictor.predict(&s);
|
||||
ASSERT_GT(p1, p2) << "First prediction must be greater than second";
|
||||
|
||||
s.add(0.5);
|
||||
double p3 = predictor.get_new_prediction(&s);
|
||||
double p3 = predictor.predict(&s);
|
||||
ASSERT_GT(p2, p3) << "Second prediction must be greater than third";
|
||||
|
||||
s.add(0.2);
|
||||
s.add(2.0);
|
||||
double p4 = predictor.get_new_prediction(&s);
|
||||
double p4 = predictor.predict(&s);
|
||||
ASSERT_GT(p4, p3) << "Fourth prediction must be greater than third";
|
||||
}
|
||||
|
||||
@ -104,24 +104,24 @@ TEST_VM(G1Predictions, unit_predictions) {
|
||||
G1Predictions predictor(0.5);
|
||||
TruncatedSeq s;
|
||||
|
||||
double p0 = predictor.get_new_unit_prediction(&s);
|
||||
double p0 = predictor.predict_in_unit_interval(&s);
|
||||
ASSERT_LT(p0, epsilon) << "Initial prediction of empty sequence must be 0.0";
|
||||
|
||||
s.add(100.0);
|
||||
double p1 = predictor.get_new_unit_prediction(&s);
|
||||
double p1 = predictor.predict_in_unit_interval(&s);
|
||||
ASSERT_NEAR(p1, 1.0, epsilon);
|
||||
|
||||
// Feed the sequence additional positive values to test the high bound.
|
||||
for (int i = 0; i < 3; i++) {
|
||||
s.add(2.0);
|
||||
}
|
||||
ASSERT_NEAR(predictor.get_new_unit_prediction(&s), 1.0, epsilon);
|
||||
ASSERT_NEAR(predictor.predict_in_unit_interval(&s), 1.0, epsilon);
|
||||
|
||||
// Feed the sequence additional large negative value to test the low bound.
|
||||
for (int i = 0; i < 4; i++) {
|
||||
s.add(-200.0);
|
||||
}
|
||||
ASSERT_NEAR(predictor.get_new_unit_prediction(&s), 0.0, epsilon);
|
||||
ASSERT_NEAR(predictor.predict_in_unit_interval(&s), 0.0, epsilon);
|
||||
}
|
||||
|
||||
// Some tests to verify bounding between [0 .. +inf]
|
||||
@ -129,7 +129,7 @@ TEST_VM(G1Predictions, lower_bound_zero_predictions) {
|
||||
G1Predictions predictor(0.5);
|
||||
TruncatedSeq s;
|
||||
|
||||
double p0 = predictor.get_new_lower_zero_bound_prediction(&s);
|
||||
double p0 = predictor.predict_zero_bounded(&s);
|
||||
ASSERT_LT(p0, epsilon) << "Initial prediction of empty sequence must be 0.0";
|
||||
|
||||
s.add(100.0);
|
||||
@ -138,11 +138,11 @@ TEST_VM(G1Predictions, lower_bound_zero_predictions) {
|
||||
for (int i = 0; i < 3; i++) {
|
||||
s.add(2.0);
|
||||
}
|
||||
ASSERT_GT(predictor.get_new_lower_zero_bound_prediction(&s), 1.0);
|
||||
ASSERT_GT(predictor.predict_zero_bounded(&s), 1.0);
|
||||
|
||||
// Feed the sequence additional large negative value to test the low bound.
|
||||
for (int i = 0; i < 4; i++) {
|
||||
s.add(-200.0);
|
||||
}
|
||||
ASSERT_NEAR(predictor.get_new_lower_zero_bound_prediction(&s), 0.0, epsilon);
|
||||
ASSERT_NEAR(predictor.predict_zero_bounded(&s), 0.0, epsilon);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user