8213224: Move code related to GC threads calculation out of AdaptiveSizePolicy

Consolidate code related to GC threads calculation into a single class

Reviewed-by: tschatzl, pliden
This commit is contained in:
Man Cao 2018-12-07 12:46:31 +08:00
parent 714b05023e
commit df4b7015bf
18 changed files with 387 additions and 306 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -521,15 +521,3 @@ void VM_Version::allow_all() {
void VM_Version::revert() {
_features = saved_features;
}
/* Determine a suitable number of threads on this particular machine.
*
* FIXME: Simply checking the processor family is insufficient.
*/
unsigned int VM_Version::calc_parallel_worker_threads() {
const int num = 5;
const int den = is_post_niagara() ? 16 : 8;
const int threshold = 8;
return nof_parallel_worker_threads(num, den, threshold);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -365,8 +365,12 @@ public:
// this properly in order to enable complete page size support.
static uint page_size_count() { return 2; }
// Calculates the number of parallel threads
static unsigned int calc_parallel_worker_threads();
// Override default denominator for ParallelGCThreads.
//
// FIXME: Simply checking the processor family is insufficient.
static uint parallel_worker_threads_denominator() {
return is_post_niagara() ? 16 : 8;
}
};
#endif // CPU_SPARC_VM_VM_VERSION_SPARC_HPP

View File

@ -30,10 +30,10 @@
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/defaultStream.hpp"
size_t CMSArguments::conservative_max_heap_alignment() {
@ -46,7 +46,7 @@ void CMSArguments::set_parnew_gc_flags() {
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
FLAG_SET_DEFAULT(ParallelGCThreads, VM_Version::parallel_worker_threads());
FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
} else if (ParallelGCThreads == 0) {
jio_fprintf(defaultStream::error_stream(),

View File

@ -61,6 +61,7 @@
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.hpp"
@ -3488,7 +3489,7 @@ void CMSConcMarkingTask::coordinator_yield() {
bool CMSCollector::do_marking_mt() {
assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
conc_workers()->active_workers(),
Threads::number_of_non_daemon_threads());
num_workers = conc_workers()->update_active_workers(num_workers);

View File

@ -49,6 +49,7 @@
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/workgroup.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/iterator.inline.hpp"
@ -866,7 +867,7 @@ void ParNewGeneration::collect(bool full,
WorkGang* workers = gch->workers();
assert(workers != NULL, "Need workgang for parallel work");
uint active_workers =
AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
WorkerPolicy::calc_active_workers(workers->total_workers(),
workers->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers->update_active_workers(active_workers);

View File

@ -30,9 +30,9 @@
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
size_t G1Arguments::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
@ -77,7 +77,7 @@ void G1Arguments::parse_verification_type(const char* type) {
void G1Arguments::initialize() {
GCArguments::initialize();
assert(UseG1GC, "Error");
FLAG_SET_DEFAULT(ParallelGCThreads, VM_Version::parallel_worker_threads());
FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
if (ParallelGCThreads == 0) {
assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);

View File

@ -61,7 +61,6 @@
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcBehaviours.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
@ -78,6 +77,7 @@
#include "gc/shared/referenceProcessor.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
@ -2912,7 +2912,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
}
GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers()->update_active_workers(active_workers);

View File

@ -39,7 +39,6 @@
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp"
@ -51,6 +50,7 @@
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "include/jvm.h"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
@ -858,7 +858,7 @@ uint G1ConcurrentMark::calc_active_marking_workers() {
result = _max_concurrent_workers;
} else {
result =
AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
1, /* Minimum workers */
_num_concurrent_workers,
Threads::number_of_non_daemon_threads());

View File

@ -37,11 +37,11 @@
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/handles.inline.hpp"
@ -88,15 +88,15 @@ uint G1FullCollector::calc_active_workers() {
uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
// Also consider HeapSizePerGCThread by calling AdaptiveSizePolicy to calculate
// Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate
// the number of workers.
uint current_active_workers = heap->workers()->active_workers();
uint adaptive_worker_limit = AdaptiveSizePolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
// Update active workers to the lower of the limits.
uint worker_count = MIN2(heap_waste_worker_limit, adaptive_worker_limit);
uint worker_count = MIN2(heap_waste_worker_limit, active_worker_limit);
log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, adaptive workers: %u)",
worker_count, heap_waste_worker_limit, adaptive_worker_limit);
worker_count, heap_waste_worker_limit, active_worker_limit);
worker_count = heap->workers()->update_active_workers(worker_count);
log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);

View File

@ -27,6 +27,7 @@
#include "gc/parallel/gcTaskThread.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/workerManager.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.hpp"
@ -494,7 +495,7 @@ GCTaskManager::~GCTaskManager() {
void GCTaskManager::set_active_gang() {
_active_workers =
AdaptiveSizePolicy::calc_active_workers(workers(),
WorkerPolicy::calc_active_workers(workers(),
active_workers(),
Threads::number_of_non_daemon_threads());

View File

@ -29,10 +29,10 @@
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/defaultStream.hpp"
size_t ParallelArguments::conservative_max_heap_alignment() {
@ -51,7 +51,7 @@ void ParallelArguments::initialize() {
// If no heap maximum was requested explicitly, use some reasonable fraction
// of the physical memory, up to a maximum of 1GB.
FLAG_SET_DEFAULT(ParallelGCThreads,
VM_Version::parallel_worker_threads());
WorkerPolicy::parallel_worker_threads());
if (ParallelGCThreads == 0) {
jio_fprintf(defaultStream::error_stream(),
"The Parallel GC can not be combined with -XX:ParallelGCThreads=0\n");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,18 +24,14 @@
#include "precompiled.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcUtil.inline.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
#include "runtime/timer.hpp"
#include "utilities/ostream.hpp"
elapsedTimer AdaptiveSizePolicy::_minor_timer;
elapsedTimer AdaptiveSizePolicy::_major_timer;
bool AdaptiveSizePolicy::_debug_perturbation = false;
// The throughput goal is implemented as
// _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
@ -94,129 +90,6 @@ AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
_young_gen_policy_is_ready = false;
}
// If the number of GC threads was set on the command line,
// use it.
// Else
// Calculate the number of GC threads based on the number of Java threads.
// Calculate the number of GC threads based on the size of the heap.
// Use the larger.
uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
const uintx min_workers,
uintx active_workers,
uintx application_workers) {
// If the user has specifically set the number of
// GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
uintx new_active_workers = total_workers;
uintx prev_active_workers = active_workers;
uintx active_workers_by_JT = 0;
uintx active_workers_by_heap_size = 0;
// Always use at least min_workers but use up to
// GCThreadsPerJavaThreads * application threads.
active_workers_by_JT =
MAX2((uintx) GCWorkersPerJavaThread * application_workers,
min_workers);
// Choose a number of GC threads based on the current size
// of the heap. This may be complicated because the size of
// the heap depends on factors such as the throughput goal.
// Still a large heap should be collected by more GC threads.
active_workers_by_heap_size =
MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
uintx max_active_workers =
MAX2(active_workers_by_JT, active_workers_by_heap_size);
new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
// Increase GC workers instantly but decrease them more
// slowly.
if (new_active_workers < prev_active_workers) {
new_active_workers =
MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
}
// Check once more that the number of workers is within the limits.
assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
assert(new_active_workers >= min_workers, "Minimum workers not observed");
assert(new_active_workers <= total_workers, "Total workers not observed");
if (ForceDynamicNumberOfGCThreads) {
// Assume this is debugging and jiggle the number of GC threads.
if (new_active_workers == prev_active_workers) {
if (new_active_workers < total_workers) {
new_active_workers++;
} else if (new_active_workers > min_workers) {
new_active_workers--;
}
}
if (new_active_workers == total_workers) {
if (_debug_perturbation) {
new_active_workers = min_workers;
}
_debug_perturbation = !_debug_perturbation;
}
assert((new_active_workers <= ParallelGCThreads) &&
(new_active_workers >= min_workers),
"Jiggled active workers too much");
}
log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
"active_workers(): " UINTX_FORMAT " new_active_workers: " UINTX_FORMAT " "
"prev_active_workers: " UINTX_FORMAT "\n"
" active_workers_by_JT: " UINTX_FORMAT " active_workers_by_heap_size: " UINTX_FORMAT,
active_workers, new_active_workers, prev_active_workers,
active_workers_by_JT, active_workers_by_heap_size);
assert(new_active_workers > 0, "Always need at least 1");
return new_active_workers;
}
uint AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
uintx active_workers,
uintx application_workers) {
// If the user has specifically set the number of
// GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
uint new_active_workers;
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
new_active_workers = total_workers;
} else {
uintx min_workers = (total_workers == 1) ? 1 : 2;
new_active_workers = calc_default_active_workers(total_workers,
min_workers,
active_workers,
application_workers);
}
assert(new_active_workers > 0, "Always need at least 1");
return new_active_workers;
}
uint AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
uintx active_workers,
uintx application_workers) {
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
return ConcGCThreads;
} else {
uint no_of_gc_threads = calc_default_active_workers(total_workers,
1, /* Minimum number of workers */
active_workers,
application_workers);
return no_of_gc_threads;
}
}
bool AdaptiveSizePolicy::tenuring_threshold_change() const {
return decrement_tenuring_threshold_for_gc_cost() ||
increment_tenuring_threshold_for_gc_cost() ||

View File

@ -25,12 +25,9 @@
#ifndef SHARE_VM_GC_SHARED_ADAPTIVESIZEPOLICY_HPP
#define SHARE_VM_GC_SHARED_ADAPTIVESIZEPOLICY_HPP
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcUtil.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/universe.hpp"
// This class keeps statistical information and computes the
// size of the heap.
@ -188,8 +185,6 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
julong _young_gen_change_for_minor_throughput;
julong _old_gen_change_for_major_throughput;
static const uint GCWorkersPerJavaThread = 2;
// Accessors
double gc_pause_goal_sec() const { return _gc_pause_goal_sec; }
@ -334,8 +329,6 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
// Return true if the policy suggested a change.
bool tenuring_threshold_change() const;
static bool _debug_perturbation;
public:
AdaptiveSizePolicy(size_t init_eden_size,
size_t init_promo_size,
@ -343,32 +336,6 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
double gc_pause_goal_sec,
uint gc_cost_ratio);
// Return number default GC threads to use in the next GC.
static uint calc_default_active_workers(uintx total_workers,
const uintx min_workers,
uintx active_workers,
uintx application_workers);
// Return number of GC threads to use in the next GC.
// This is called sparingly so as not to change the
// number of GC workers gratuitously.
// For ParNew collections
// For PS scavenge and ParOld collections
// For G1 evacuation pauses (subject to update)
// For G1 Full GCs (subject to update)
// Other collection phases inherit the number of
// GC workers from the calls above. For example,
// a CMS parallel remark uses the same number of GC
// workers as the most recent ParNew collection.
static uint calc_active_workers(uintx total_workers,
uintx active_workers,
uintx application_workers);
// Return number of GC threads to use in the next concurrent GC phase.
static uint calc_active_conc_workers(uintx total_workers,
uintx active_workers,
uintx application_workers);
bool is_gc_cms_adaptive_size_policy() {
return kind() == _gc_cms_adaptive_size_policy;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,11 @@
#ifndef SHARE_VM_GC_SHARED_WORKERMANAGER_HPP
#define SHARE_VM_GC_SHARED_WORKERMANAGER_HPP
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/globalDefinitions.hpp"
class WorkerManager : public AllStatic {
public:
@ -47,6 +51,23 @@ class WorkerManager : public AllStatic {
// threads and a failure would not be optimal but should not be fatal.
template <class WorkerType>
static uint add_workers (WorkerType* holder,
uint active_workers,
uint total_workers,
uint created_workers,
os::ThreadType worker_type,
bool initializing);
// Log (at trace level) a change in the number of created workers.
template <class WorkerType>
static void log_worker_creation(WorkerType* holder,
uint previous_created_workers,
uint active_workers,
uint created_workers,
bool initializing);
};
template <class WorkerType>
uint WorkerManager::add_workers(WorkerType* holder,
uint active_workers,
uint total_workers,
uint created_workers,
@ -79,11 +100,10 @@ class WorkerManager : public AllStatic {
"created_workers: %u", created_workers);
return created_workers;
}
}
// Log (at trace level) a change in the number of created workers.
template <class WorkerType>
static void log_worker_creation(WorkerType* holder,
template <class WorkerType>
void WorkerManager::log_worker_creation(WorkerType* holder,
uint previous_created_workers,
uint active_workers,
uint created_workers,
@ -93,6 +113,6 @@ class WorkerManager : public AllStatic {
log_trace(gc, task)("%s %s(s) previously created workers %u active workers %u total created workers %u",
initializing_msg, holder->group_name(), previous_created_workers, active_workers, created_workers);
}
}
};
}
#endif // SHARE_VM_GC_SHARED_WORKERMANAGER_HPP

View File

@ -0,0 +1,203 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/vm_version.hpp"
bool WorkerPolicy::_debug_perturbation = false;
uint WorkerPolicy::_parallel_worker_threads = 0;
bool WorkerPolicy::_parallel_worker_threads_initialized = false;
uint WorkerPolicy::nof_parallel_worker_threads(uint num,
uint den,
uint switch_pt) {
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
assert(ParallelGCThreads == 0, "Default ParallelGCThreads is not 0");
uint threads;
// For very large machines, there are diminishing returns
// for large numbers of worker threads. Instead of
// hogging the whole system, use a fraction of the workers for every
// processor after the first 8. For example, on a 72 cpu machine
// and a chosen fraction of 5/8
// use 8 + (72 - 8) * (5/8) == 48 worker threads.
uint ncpus = (uint) os::initial_active_processor_count();
threads = (ncpus <= switch_pt) ?
ncpus :
(switch_pt + ((ncpus - switch_pt) * num) / den);
#ifndef _LP64
// On 32-bit binaries the virtual address space available to the JVM
// is usually limited to 2-3 GB (depends on the platform).
// Do not use up address space with too many threads (stacks and per-thread
// data). Note that x86 apps running on Win64 have 2 stacks per thread.
// GC may more generally scale down threads by max heap size (etc), but the
// consequences of over-provisioning threads are higher on 32-bit JVMS,
// so add hard limit here:
threads = MIN2(threads, (2 * switch_pt));
#endif
return threads;
} else {
return ParallelGCThreads;
}
}
uint WorkerPolicy::calc_parallel_worker_threads() {
uint den = VM_Version::parallel_worker_threads_denominator();
return nof_parallel_worker_threads(5, den, 8);
}
uint WorkerPolicy::parallel_worker_threads() {
if (!_parallel_worker_threads_initialized) {
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
_parallel_worker_threads = WorkerPolicy::calc_parallel_worker_threads();
} else {
_parallel_worker_threads = ParallelGCThreads;
}
_parallel_worker_threads_initialized = true;
}
return _parallel_worker_threads;
}
// If the number of GC threads was set on the command line, use it.
// Else
// Calculate the number of GC threads based on the number of Java threads.
// Calculate the number of GC threads based on the size of the heap.
// Use the larger.
uint WorkerPolicy::calc_default_active_workers(uintx total_workers,
const uintx min_workers,
uintx active_workers,
uintx application_workers) {
// If the user has specifically set the number of GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
uintx new_active_workers = total_workers;
uintx prev_active_workers = active_workers;
uintx active_workers_by_JT = 0;
uintx active_workers_by_heap_size = 0;
// Always use at least min_workers but use up to
// GCThreadsPerJavaThreads * application threads.
active_workers_by_JT =
MAX2((uintx) GCWorkersPerJavaThread * application_workers,
min_workers);
// Choose a number of GC threads based on the current size
// of the heap. This may be complicated because the size of
// the heap depends on factors such as the throughput goal.
// Still a large heap should be collected by more GC threads.
active_workers_by_heap_size =
MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
uintx max_active_workers =
MAX2(active_workers_by_JT, active_workers_by_heap_size);
new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
// Increase GC workers instantly but decrease them more
// slowly.
if (new_active_workers < prev_active_workers) {
new_active_workers =
MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
}
// Check once more that the number of workers is within the limits.
assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
assert(new_active_workers >= min_workers, "Minimum workers not observed");
assert(new_active_workers <= total_workers, "Total workers not observed");
if (ForceDynamicNumberOfGCThreads) {
// Assume this is debugging and jiggle the number of GC threads.
if (new_active_workers == prev_active_workers) {
if (new_active_workers < total_workers) {
new_active_workers++;
} else if (new_active_workers > min_workers) {
new_active_workers--;
}
}
if (new_active_workers == total_workers) {
if (_debug_perturbation) {
new_active_workers = min_workers;
}
_debug_perturbation = !_debug_perturbation;
}
assert((new_active_workers <= ParallelGCThreads) &&
(new_active_workers >= min_workers),
"Jiggled active workers too much");
}
log_trace(gc, task)("WorkerPolicy::calc_default_active_workers() : "
"active_workers(): " UINTX_FORMAT " new_active_workers: " UINTX_FORMAT " "
"prev_active_workers: " UINTX_FORMAT "\n"
" active_workers_by_JT: " UINTX_FORMAT " active_workers_by_heap_size: " UINTX_FORMAT,
active_workers, new_active_workers, prev_active_workers,
active_workers_by_JT, active_workers_by_heap_size);
assert(new_active_workers > 0, "Always need at least 1");
return new_active_workers;
}
uint WorkerPolicy::calc_active_workers(uintx total_workers,
uintx active_workers,
uintx application_workers) {
// If the user has specifically set the number of GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
uint new_active_workers;
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
new_active_workers = total_workers;
} else {
uintx min_workers = (total_workers == 1) ? 1 : 2;
new_active_workers = calc_default_active_workers(total_workers,
min_workers,
active_workers,
application_workers);
}
assert(new_active_workers > 0, "Always need at least 1");
return new_active_workers;
}
uint WorkerPolicy::calc_active_conc_workers(uintx total_workers,
uintx active_workers,
uintx application_workers) {
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
return ConcGCThreads;
} else {
uint no_of_gc_threads = calc_default_active_workers(total_workers,
1, /* Minimum number of workers */
active_workers,
application_workers);
return no_of_gc_threads;
}
}

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHARED_WORKERPOLICY_HPP
#define SHARE_VM_GC_SHARED_WORKERPOLICY_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
class WorkerPolicy : public AllStatic {
static const uint GCWorkersPerJavaThread = 2;
static bool _debug_perturbation;
static uint _parallel_worker_threads;
static bool _parallel_worker_threads_initialized;
static uint nof_parallel_worker_threads(uint num,
uint den,
uint switch_pt);
// Calculates and returns the number of parallel GC threads. May
// be CPU-architecture-specific.
static uint calc_parallel_worker_threads();
public:
// Returns the number of parallel threads to be used as default value of
// ParallelGCThreads. If that number has not been calculated, do so and
// save it. Returns ParallelGCThreads if it is set on the
// command line.
static uint parallel_worker_threads();
// Return number default GC threads to use in the next GC.
static uint calc_default_active_workers(uintx total_workers,
const uintx min_workers,
uintx active_workers,
uintx application_workers);
// Return number of GC threads to use in the next GC.
// This is called sparingly so as not to change the
// number of GC workers gratuitously.
// For ParNew collections
// For PS scavenge and ParOld collections
// For G1 evacuation pauses (subject to update)
// For G1 Full GCs (subject to update)
// Other collection phases inherit the number of
// GC workers from the calls above. For example,
// a CMS parallel remark uses the same number of GC
// workers as the most recent ParNew collection.
static uint calc_active_workers(uintx total_workers,
uintx active_workers,
uintx application_workers);
// Return number of GC threads to use in the next concurrent GC phase.
static uint calc_active_conc_workers(uintx total_workers,
uintx active_workers,
uintx application_workers);
};
#endif // SHARE_VM_GC_SHARED_WORKERPOLICY_HPP

View File

@ -81,8 +81,6 @@ int Abstract_VM_Version::_vm_minor_version = VERSION_INTERIM;
int Abstract_VM_Version::_vm_security_version = VERSION_UPDATE;
int Abstract_VM_Version::_vm_patch_version = VERSION_PATCH;
int Abstract_VM_Version::_vm_build_number = VERSION_BUILD;
unsigned int Abstract_VM_Version::_parallel_worker_threads = 0;
bool Abstract_VM_Version::_parallel_worker_threads_initialized = false;
#if defined(_LP64)
#define VMLP "64-Bit "
@ -312,55 +310,3 @@ void VM_Version_init() {
os::print_cpu_info(&ls, buf, sizeof(buf));
}
}
unsigned int Abstract_VM_Version::nof_parallel_worker_threads(
unsigned int num,
unsigned int den,
unsigned int switch_pt) {
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
assert(ParallelGCThreads == 0, "Default ParallelGCThreads is not 0");
unsigned int threads;
// For very large machines, there are diminishing returns
// for large numbers of worker threads. Instead of
// hogging the whole system, use a fraction of the workers for every
// processor after the first 8. For example, on a 72 cpu machine
// and a chosen fraction of 5/8
// use 8 + (72 - 8) * (5/8) == 48 worker threads.
unsigned int ncpus = (unsigned int) os::initial_active_processor_count();
threads = (ncpus <= switch_pt) ?
ncpus :
(switch_pt + ((ncpus - switch_pt) * num) / den);
#ifndef _LP64
// On 32-bit binaries the virtual address space available to the JVM
// is usually limited to 2-3 GB (depends on the platform).
// Do not use up address space with too many threads (stacks and per-thread
// data). Note that x86 apps running on Win64 have 2 stacks per thread.
// GC may more generally scale down threads by max heap size (etc), but the
// consequences of over-provisioning threads are higher on 32-bit JVMS,
// so add hard limit here:
threads = MIN2(threads, (2*switch_pt));
#endif
return threads;
} else {
return ParallelGCThreads;
}
}
unsigned int Abstract_VM_Version::calc_parallel_worker_threads() {
return nof_parallel_worker_threads(5, 8, 8);
}
// Does not set the _initialized flag since it is
// a global flag.
unsigned int Abstract_VM_Version::parallel_worker_threads() {
if (!_parallel_worker_threads_initialized) {
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
_parallel_worker_threads = VM_Version::calc_parallel_worker_threads();
} else {
_parallel_worker_threads = ParallelGCThreads;
}
_parallel_worker_threads_initialized = true;
}
return _parallel_worker_threads;
}

View File

@ -56,12 +56,7 @@ class Abstract_VM_Version: AllStatic {
static int _vm_security_version;
static int _vm_patch_version;
static int _vm_build_number;
static unsigned int _parallel_worker_threads;
static bool _parallel_worker_threads_initialized;
static unsigned int nof_parallel_worker_threads(unsigned int num,
unsigned int dem,
unsigned int switch_pt);
public:
// Called as part of the runtime services initialization which is
// called from the management module initialization (via init_globals())
@ -153,9 +148,10 @@ class Abstract_VM_Version: AllStatic {
// save it. Returns ParallelGCThreads if it is set on the
// command line.
static unsigned int parallel_worker_threads();
// Calculates and returns the number of parallel threads. May
// be VM version specific.
static unsigned int calc_parallel_worker_threads();
// Denominator for computing default ParallelGCThreads for machines with
// a large number of cores.
static uint parallel_worker_threads_denominator() { return 8; }
// Does this CPU support spin wait instruction?
static bool supports_on_spin_wait() { return false; }