8308097: Generational ZGC: Update constructor syntax

Reviewed-by: eosterlund, aboldtch
This commit is contained in:
Stefan Karlsson 2023-05-16 16:13:58 +00:00
parent 599fa774b8
commit 60ab1358da
95 changed files with 605 additions and 614 deletions

View File

@ -601,8 +601,8 @@ private:
}
public:
ZAdjustAddress(MacroAssembler* masm, Address addr) :
_masm(masm),
ZAdjustAddress(MacroAssembler* masm, Address addr)
: _masm(masm),
_addr(addr),
_pre_adjustment(addr.getMode() == Address::pre ? addr.offset() : 0),
_post_adjustment(addr.getMode() == Address::post ? addr.offset() : 0) {
@ -1132,8 +1132,8 @@ public:
}
}
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub) :
_masm(masm),
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub)
: _masm(masm),
_gp_regs(),
_fp_regs(),
_p_regs() {
@ -1169,8 +1169,8 @@ private:
const Address _ref_addr;
public:
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
_masm(masm),
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub)
: _masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {

View File

@ -860,7 +860,7 @@ class ZSaveLiveRegisters {
public:
ZSaveLiveRegisters(MacroAssembler *masm, ZBarrierStubC2 *stub)
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->result()) {
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->result()) {
const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
_frame_size = align_up(register_save_size, frame::alignment_in_bytes)
@ -972,8 +972,8 @@ class ZSetupArguments {
const Address _ref_addr;
public:
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
_masm(masm),
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub)
: _masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {

View File

@ -420,8 +420,7 @@ void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
Register src,
Register dst,
Register count,
RegSet saved_regs) {
}
RegSet saved_regs) {}
static void copy_load_barrier(MacroAssembler* masm,
Register ref,
@ -695,8 +694,8 @@ public:
}
}
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub) :
_masm(masm),
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub)
: _masm(masm),
_gp_regs(),
_fp_regs(),
_vp_regs() {
@ -724,8 +723,8 @@ private:
const Address _ref_addr;
public:
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
_masm(masm),
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub)
: _masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {

View File

@ -59,8 +59,7 @@
ZBarrierSetAssembler::ZBarrierSetAssembler()
: _load_bad_relocations(),
_store_bad_relocations(),
_store_good_relocations() {
}
_store_good_relocations() {}
enum class ZXMMSpillMode {
none,
@ -1354,8 +1353,8 @@ private:
}
public:
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub) :
_masm(masm),
ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub)
: _masm(masm),
_gp_registers(),
_opmask_registers(),
_xmm_registers(),
@ -1446,8 +1445,8 @@ private:
const Address _ref_addr;
public:
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
_masm(masm),
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub)
: _masm(masm),
_ref(stub->ref()),
_ref_addr(stub->ref_addr()) {

View File

@ -74,8 +74,8 @@ static ZErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
return (res == KERN_SUCCESS) ? ZErrno(0) : ZErrno(EINVAL);
}
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_base(0),
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
: _base(0),
_initialized(false) {
// Reserve address space for backing memory

View File

@ -118,8 +118,8 @@ static const char* z_preferred_hugetlbfs_mountpoints[] = {
static int z_fallocate_hugetlbfs_attempts = 3;
static bool z_fallocate_supported = true;
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_fd(-1),
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
: _fd(-1),
_filesystem(0),
_block_size(0),
_available(0),

View File

@ -69,8 +69,8 @@ private:
}
public:
ZPhysicalMemoryBackingSmallPages(size_t max_capacity) :
ZPhysicalMemoryBackingImpl(),
ZPhysicalMemoryBackingSmallPages(size_t max_capacity)
: ZPhysicalMemoryBackingImpl(),
_handles(max_capacity) {}
size_t commit(zoffset offset, size_t size) {
@ -146,8 +146,8 @@ private:
}
public:
ZPhysicalMemoryBackingLargePages(size_t max_capacity) :
ZPhysicalMemoryBackingImpl(),
ZPhysicalMemoryBackingLargePages(size_t max_capacity)
: ZPhysicalMemoryBackingImpl(),
_page_array(alloc_page_array(max_capacity)) {}
size_t commit(zoffset offset, size_t size) {
@ -212,8 +212,8 @@ static ZPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) {
return new ZPhysicalMemoryBackingSmallPages(max_capacity);
}
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_impl(select_impl(max_capacity)) {}
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
: _impl(select_impl(max_capacity)) {}
bool ZPhysicalMemoryBacking::is_initialized() const {
return true;

View File

@ -34,8 +34,8 @@
#include "gc/z/zThreadLocalData.hpp"
#include "utilities/macros.hpp"
ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
_decorators(access.decorators()),
ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub)
: _decorators(access.decorators()),
_ref_addr(access.resolved_addr()),
_ref(ref),
_tmp(LIR_OprFact::illegalOpr),
@ -100,8 +100,8 @@ ZStoreBarrierStubC1::ZStoreBarrierStubC1(LIRAccess& access,
LIR_Opr new_zpointer,
LIR_Opr tmp,
bool is_atomic,
address runtime_stub) :
_ref_addr(access.resolved_addr()),
address runtime_stub)
: _ref_addr(access.resolved_addr()),
_new_zaddress(new_zaddress),
_new_zpointer(new_zpointer),
_tmp(tmp),
@ -156,8 +156,8 @@ private:
LIR_Opr _opr;
public:
LIR_OpZUncolor(LIR_Opr opr) :
LIR_Op(),
LIR_OpZUncolor(LIR_Opr opr)
: LIR_Op(),
_opr(opr) {}
virtual void visit(LIR_OpVisitState* state) {
@ -188,8 +188,8 @@ private:
const bool _on_non_strong;
public:
LIR_OpZLoadBarrier(LIR_Opr opr, ZLoadBarrierStubC1* stub, bool on_non_strong) :
LIR_Op(),
LIR_OpZLoadBarrier(LIR_Opr opr, ZLoadBarrierStubC1* stub, bool on_non_strong)
: LIR_Op(),
_opr(opr),
_stub(stub),
_on_non_strong(on_non_strong) {
@ -223,8 +223,8 @@ static bool barrier_needed(LIRAccess& access) {
return ZBarrierSet::barrier_needed(access.decorators(), access.type());
}
ZBarrierSetC1::ZBarrierSetC1() :
_load_barrier_on_oop_field_preloaded_runtime_stub(nullptr),
ZBarrierSetC1::ZBarrierSetC1()
: _load_barrier_on_oop_field_preloaded_runtime_stub(nullptr),
_load_barrier_on_weak_oop_field_preloaded_runtime_stub(nullptr),
_store_barrier_on_oop_field_with_healing(nullptr),
_store_barrier_on_oop_field_without_healing(nullptr) {}
@ -255,8 +255,8 @@ private:
LIR_Opr _opr;
public:
LIR_OpZColor(LIR_Opr opr) :
LIR_Op(lir_none, opr, nullptr /* info */),
LIR_OpZColor(LIR_Opr opr)
: LIR_Op(lir_none, opr, nullptr /* info */),
_opr(opr) {}
virtual void visit(LIR_OpVisitState* state) {
@ -294,8 +294,8 @@ public:
LIR_Opr new_zaddress,
LIR_Opr new_zpointer,
CodeStub* stub,
CodeEmitInfo* info) :
LIR_Op(lir_none, new_zpointer, nullptr /* info */),
CodeEmitInfo* info)
: LIR_Op(lir_none, new_zpointer, nullptr /* info */),
_addr(addr),
_new_zaddress(new_zaddress),
_new_zpointer(new_zpointer),
@ -498,8 +498,8 @@ private:
const DecoratorSet _decorators;
public:
ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
_decorators(decorators) {}
ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators)
: _decorators(decorators) {}
virtual OopMapSet* generate_code(StubAssembler* sasm) {
ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
@ -518,8 +518,8 @@ private:
const bool _self_healing;
public:
ZStoreBarrierRuntimeStubCodeGenClosure(bool self_healing) :
_self_healing(self_healing) {}
ZStoreBarrierRuntimeStubCodeGenClosure(bool self_healing)
: _self_healing(self_healing) {}
virtual OopMapSet* generate_code(StubAssembler* sasm) {
ZBarrierSet::assembler()->generate_c1_store_barrier_runtime_stub(sasm, _self_healing);

View File

@ -66,8 +66,8 @@ public:
size_t _current_index;
public:
Iterator(ZArenaHashtable* table) :
_table(table),
Iterator(ZArenaHashtable* table)
: _table(table),
_current_entry(table->_table[0]),
_current_index(0) {
if (_current_entry == nullptr) {
@ -89,8 +89,8 @@ public:
}
};
ZArenaHashtable(Arena* arena) :
_arena(arena),
ZArenaHashtable(Arena* arena)
: _arena(arena),
_table() {
Copy::zero_to_bytes(&_table, sizeof(_table));
}
@ -127,8 +127,8 @@ private:
int _stubs_start_offset;
public:
ZBarrierSetC2State(Arena* arena) :
_stubs(new (arena) GrowableArray<ZBarrierStubC2*>(arena, 8, 0, nullptr)),
ZBarrierSetC2State(Arena* arena)
: _stubs(new (arena) GrowableArray<ZBarrierStubC2*>(arena, 8, 0, nullptr)),
_live(arena),
_trampoline_stubs_count(0),
_stubs_start_offset(0) {}
@ -200,8 +200,8 @@ int ZBarrierStubC2::stubs_start_offset() {
return barrier_set_state()->stubs_start_offset();
}
ZBarrierStubC2::ZBarrierStubC2(const MachNode* node) :
_node(node),
ZBarrierStubC2::ZBarrierStubC2(const MachNode* node)
: _node(node),
_entry(),
_continuation() {}
@ -232,8 +232,8 @@ ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref
return stub;
}
ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref) :
ZBarrierStubC2(node),
ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref)
: ZBarrierStubC2(node),
_ref_addr(ref_addr),
_ref(ref) {
assert_different_registers(ref, ref_addr.base());
@ -281,14 +281,13 @@ ZStoreBarrierStubC2* ZStoreBarrierStubC2::create(const MachNode* node, Address r
return stub;
}
ZStoreBarrierStubC2::ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) :
ZBarrierStubC2(node),
ZStoreBarrierStubC2::ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic)
: ZBarrierStubC2(node),
_ref_addr(ref_addr),
_new_zaddress(new_zaddress),
_new_zpointer(new_zpointer),
_is_native(is_native),
_is_atomic(is_atomic) {
}
_is_atomic(is_atomic) {}
Address ZStoreBarrierStubC2::ref_addr() const {
return _ref_addr;

View File

@ -25,8 +25,8 @@
#include "gc/z/vmStructs_z.hpp"
#include "gc/z/zAddress.hpp"
ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
_ZAddressOffsetMask(&ZAddressOffsetMask),
ZGlobalsForVMStructs::ZGlobalsForVMStructs()
: _ZAddressOffsetMask(&ZAddressOffsetMask),
_ZPointerLoadGoodMask(&ZPointerLoadGoodMask),
_ZPointerLoadBadMask(&ZPointerLoadBadMask),
_ZPointerLoadShift(const_cast<size_t*>(&ZPointerLoadShift)),
@ -35,8 +35,7 @@ ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
_ZPointerStoreGoodMask(&ZPointerStoreGoodMask),
_ZPointerStoreBadMask(&ZPointerStoreBadMask),
_ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift),
_ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {
}
_ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {}
ZGlobalsForVMStructs ZGlobalsForVMStructs::_instance;
ZGlobalsForVMStructs* ZGlobalsForVMStructs::_instance_p = &ZGlobalsForVMStructs::_instance;

View File

@ -54,8 +54,8 @@ private:
uint8_t _flags;
public:
ZAllocationFlags() :
_flags(0) {}
ZAllocationFlags()
: _flags(0) {}
void set_non_blocking() {
_flags |= field_non_blocking::encode(true);

View File

@ -28,15 +28,15 @@
ZAllocatorEden* ZAllocator::_eden;
ZAllocatorForRelocation* ZAllocator::_relocation[ZAllocator::_relocation_allocators];
ZAllocator::ZAllocator(ZPageAge age) :
_object_allocator(age) {}
ZAllocator::ZAllocator(ZPageAge age)
: _object_allocator(age) {}
void ZAllocator::retire_pages() {
_object_allocator.retire_pages();
}
ZAllocatorEden::ZAllocatorEden() :
ZAllocator(ZPageAge::eden) {
ZAllocatorEden::ZAllocatorEden()
: ZAllocator(ZPageAge::eden) {
ZAllocator::_eden = this;
}
@ -60,9 +60,8 @@ ZPageAge ZAllocatorForRelocation::install() {
return ZPageAge::eden;
}
ZAllocatorForRelocation::ZAllocatorForRelocation() :
ZAllocator(install()) {
}
ZAllocatorForRelocation::ZAllocatorForRelocation()
: ZAllocator(install()) {}
zaddress ZAllocatorForRelocation::alloc_object(size_t size) {
return _object_allocator.alloc_object_for_relocation(size);

View File

@ -54,14 +54,14 @@ inline bool ZArrayIteratorImpl<T, Parallel>::next_parallel(size_t* index) {
}
template <typename T, bool Parallel>
inline ZArrayIteratorImpl<T, Parallel>::ZArrayIteratorImpl(const T* array, size_t length) :
_next(0),
inline ZArrayIteratorImpl<T, Parallel>::ZArrayIteratorImpl(const T* array, size_t length)
: _next(0),
_end(length),
_array(array) {}
template <typename T, bool Parallel>
inline ZArrayIteratorImpl<T, Parallel>::ZArrayIteratorImpl(const ZArray<T>* array) :
ZArrayIteratorImpl<T, Parallel>(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {}
inline ZArrayIteratorImpl<T, Parallel>::ZArrayIteratorImpl(const ZArray<T>* array)
: ZArrayIteratorImpl<T, Parallel>(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {}
template <typename T, bool Parallel>
inline bool ZArrayIteratorImpl<T, Parallel>::next(T* elem) {
@ -90,8 +90,8 @@ inline T ZArrayIteratorImpl<T, Parallel>::index_to_elem(size_t index) {
}
template <typename T>
ZActivatedArray<T>::ZActivatedArray(bool locked) :
_lock(locked ? new ZLock() : nullptr),
ZActivatedArray<T>::ZActivatedArray(bool locked)
: _lock(locked ? new ZLock() : nullptr),
_count(0),
_array() {}

View File

@ -70,8 +70,8 @@ inline void ZAttachedArray<ObjectT, ArrayT>::free(ObjectT* obj) {
}
template <typename ObjectT, typename ArrayT>
inline ZAttachedArray<ObjectT, ArrayT>::ZAttachedArray(size_t length) :
_length(length) {}
inline ZAttachedArray<ObjectT, ArrayT>::ZAttachedArray(size_t length)
: _length(length) {}
template <typename ObjectT, typename ArrayT>
inline size_t ZAttachedArray<ObjectT, ArrayT>::length() const {

View File

@ -47,8 +47,8 @@
class ZBarrierSetC1;
class ZBarrierSetC2;
ZBarrierSet::ZBarrierSet() :
BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
ZBarrierSet::ZBarrierSet()
: BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
make_barrier_set_c1<ZBarrierSetC1>(),
make_barrier_set_c2<ZBarrierSetC2>(),
new ZBarrierSetNMethod(),

View File

@ -30,20 +30,20 @@
#include "utilities/bitMap.inline.hpp"
#include "utilities/debug.hpp"
inline ZMovableBitMap::ZMovableBitMap() :
CHeapBitMap(mtGC) {}
inline ZMovableBitMap::ZMovableBitMap()
: CHeapBitMap(mtGC) {}
inline ZMovableBitMap::ZMovableBitMap(ZMovableBitMap&& bitmap) :
CHeapBitMap(mtGC) {
inline ZMovableBitMap::ZMovableBitMap(ZMovableBitMap&& bitmap)
: CHeapBitMap(mtGC) {
update(bitmap.map(), bitmap.size());
bitmap.update(nullptr, 0);
}
inline ZBitMap::ZBitMap(idx_t size_in_bits) :
CHeapBitMap(size_in_bits, mtGC, false /* clear */) {}
inline ZBitMap::ZBitMap(idx_t size_in_bits)
: CHeapBitMap(size_in_bits, mtGC, false /* clear */) {}
inline ZBitMap::ZBitMap(const ZBitMap& other) :
CHeapBitMap(other.size(), mtGC, false /* clear */) {
inline ZBitMap::ZBitMap(const ZBitMap& other)
: CHeapBitMap(other.size(), mtGC, false /* clear */) {
memcpy(map(), other.map(), size_in_bytes());
}
@ -91,11 +91,11 @@ inline bool ZBitMap::par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_liv
}
}
inline ZBitMap::ReverseIterator::ReverseIterator(BitMap* bitmap) :
ZBitMap::ReverseIterator(bitmap, 0, bitmap->size()) {}
inline ZBitMap::ReverseIterator::ReverseIterator(BitMap* bitmap)
: ZBitMap::ReverseIterator(bitmap, 0, bitmap->size()) {}
inline ZBitMap::ReverseIterator::ReverseIterator(BitMap* bitmap, BitMap::idx_t beg, BitMap::idx_t end) :
_bitmap(bitmap),
inline ZBitMap::ReverseIterator::ReverseIterator(BitMap* bitmap, BitMap::idx_t beg, BitMap::idx_t end)
: _bitmap(bitmap),
_beg(beg),
_end(end) {}

View File

@ -58,8 +58,8 @@ ZCollectedHeap* ZCollectedHeap::heap() {
return named_heap<ZCollectedHeap>(CollectedHeap::Z);
}
ZCollectedHeap::ZCollectedHeap() :
_soft_ref_policy(),
ZCollectedHeap::ZCollectedHeap()
: _soft_ref_policy(),
_barrier_set(),
_initialize(&_barrier_set),
_heap(),

View File

@ -82,9 +82,8 @@ oop ZContinuation::load_oop(stackChunkOop chunk, void* addr) {
return to_oop(ZBarrier::load_barrier_on_oop_field_preloaded(nullptr /* p */, zptr));
}
ZContinuation::ZColorStackOopClosure::ZColorStackOopClosure(stackChunkOop chunk) :
_color(ZStackChunkGCData::color(chunk)) {
}
ZContinuation::ZColorStackOopClosure::ZColorStackOopClosure(stackChunkOop chunk)
: _color(ZStackChunkGCData::color(chunk)) {}
void ZContinuation::ZColorStackOopClosure::do_oop(oop* p) {
// Convert zaddress to zpointer

View File

@ -70,8 +70,8 @@ struct ZDirectorStats {
ZDirectorGenerationStats _old_stats;
};
ZDirector::ZDirector() :
_monitor(),
ZDirector::ZDirector()
: _monitor(),
_stopped(false) {
_director = this;
set_name("ZDirector");

View File

@ -46,8 +46,8 @@ private:
DriverT* _driver;
public:
ZGCCauseSetter(DriverT* driver, GCCause::Cause cause) :
GCCauseSetter(ZCollectedHeap::heap(), cause),
ZGCCauseSetter(DriverT* driver, GCCause::Cause cause)
: GCCauseSetter(ZCollectedHeap::heap(), cause),
_driver(driver) {
_driver->set_gc_cause(cause);
}
@ -105,9 +105,8 @@ ZDriverUnlocker::~ZDriverUnlocker() {
ZDriver::lock();
}
ZDriver::ZDriver() :
_gc_cause(GCCause::_no_gc) {
}
ZDriver::ZDriver()
: _gc_cause(GCCause::_no_gc) {}
void ZDriver::set_gc_cause(GCCause::Cause cause) {
_gc_cause = cause;
@ -117,8 +116,8 @@ GCCause::Cause ZDriver::gc_cause() {
return _gc_cause;
}
ZDriverMinor::ZDriverMinor() :
ZDriver(),
ZDriverMinor::ZDriverMinor()
: ZDriver(),
_port(),
_gc_timer(),
_jfr_tracer(),
@ -175,8 +174,8 @@ private:
ZServiceabilityCycleTracer _tracer;
public:
ZDriverScopeMinor(const ZDriverRequest& request, ConcurrentGCTimer* gc_timer) :
_gc_id(),
ZDriverScopeMinor(const ZDriverRequest& request, ConcurrentGCTimer* gc_timer)
: _gc_id(),
_gc_cause(request.cause()),
_gc_cause_setter(ZDriver::minor(), _gc_cause),
_stat_timer(ZPhaseCollectionMinor, gc_timer),
@ -307,8 +306,8 @@ static bool should_preclean_young(GCCause::Cause cause) {
return ScavengeBeforeFullGC;
}
ZDriverMajor::ZDriverMajor() :
ZDriver(),
ZDriverMajor::ZDriverMajor()
: ZDriver(),
_port(),
_gc_timer(),
_jfr_tracer(),
@ -380,8 +379,8 @@ private:
ZServiceabilityCycleTracer _tracer;
public:
ZDriverScopeMajor(const ZDriverRequest& request, ConcurrentGCTimer* gc_timer) :
_gc_id(),
ZDriverScopeMajor(const ZDriverRequest& request, ConcurrentGCTimer* gc_timer)
: _gc_id(),
_gc_cause(request.cause()),
_gc_cause_setter(ZDriver::major(), _gc_cause),
_stat_timer(ZPhaseCollectionMajor, gc_timer),

View File

@ -28,11 +28,11 @@
#include "gc/z/zLock.inline.hpp"
#include "utilities/debug.hpp"
ZDriverRequest::ZDriverRequest() :
ZDriverRequest(GCCause::_no_gc, 0, 0) {}
ZDriverRequest::ZDriverRequest()
: ZDriverRequest(GCCause::_no_gc, 0, 0) {}
ZDriverRequest::ZDriverRequest(GCCause::Cause cause, uint young_nworkers, uint old_nworkers) :
_cause(cause),
ZDriverRequest::ZDriverRequest(GCCause::Cause cause, uint young_nworkers, uint old_nworkers)
: _cause(cause),
_young_nworkers(young_nworkers),
_old_nworkers(old_nworkers) {}
@ -62,8 +62,8 @@ private:
ZListNode<ZDriverPortEntry> _node;
public:
ZDriverPortEntry(const ZDriverRequest& message) :
_message(message),
ZDriverPortEntry(const ZDriverRequest& message)
: _message(message),
_seqnum(0) {}
void set_seqnum(uint64_t seqnum) {
@ -88,8 +88,8 @@ public:
}
};
ZDriverPort::ZDriverPort() :
_lock(),
ZDriverPort::ZDriverPort()
: _lock(),
_has_message(false),
_seqnum(0),
_queue() {}

View File

@ -28,11 +28,11 @@
#include <errno.h>
#include <string.h>
ZErrno::ZErrno() :
_error(errno) {}
ZErrno::ZErrno()
: _error(errno) {}
ZErrno::ZErrno(int error) :
_error(error) {}
ZErrno::ZErrno(int error)
: _error(error) {}
ZErrno::operator bool() const {
return _error != 0;

View File

@ -56,8 +56,8 @@ inline ZForwarding* ZForwarding::alloc(ZForwardingAllocator* allocator, ZPage* p
return ::new (addr) ZForwarding(page, to_age, nentries);
}
inline ZForwarding::ZForwarding(ZPage* page, ZPageAge to_age, size_t nentries) :
_virtual(page->virtual_memory()),
inline ZForwarding::ZForwarding(ZPage* page, ZPageAge to_age, size_t nentries)
: _virtual(page->virtual_memory()),
_object_alignment_shift(page->object_alignment_shift()),
_entries(nentries),
_page(page),

View File

@ -25,8 +25,8 @@
#include "gc/z/zForwardingAllocator.hpp"
#include "memory/allocation.inline.hpp"
ZForwardingAllocator::ZForwardingAllocator() :
_start(nullptr),
ZForwardingAllocator::ZForwardingAllocator()
: _start(nullptr),
_end(nullptr),
_top(nullptr) {}

View File

@ -59,11 +59,11 @@ private:
uint64_t _entry;
public:
ZForwardingEntry() :
_entry(0) {}
ZForwardingEntry()
: _entry(0) {}
ZForwardingEntry(size_t from_index, size_t to_offset) :
_entry(field_populated::encode(true) |
ZForwardingEntry(size_t from_index, size_t to_offset)
: _entry(field_populated::encode(true) |
field_to_offset::encode(to_offset) |
field_from_index::encode(from_index)) {}

View File

@ -33,8 +33,8 @@
#include "gc/z/zIndexDistributor.inline.hpp"
#include "utilities/debug.hpp"
inline ZForwardingTable::ZForwardingTable() :
_map(ZAddressOffsetMax) {}
inline ZForwardingTable::ZForwardingTable()
: _map(ZAddressOffsetMax) {}
inline ZForwarding* ZForwardingTable::at(size_t index) const {
return _map.at(index);

View File

@ -30,8 +30,8 @@
#include "runtime/semaphore.inline.hpp"
template <typename T>
inline ZFuture<T>::ZFuture() :
_value() {}
inline ZFuture<T>::ZFuture()
: _value() {}
template <typename T>
inline void ZFuture<T>::set(T value) {

View File

@ -56,8 +56,8 @@ size_t ZGCIdPrinter::print_gc_id(uint gc_id, char* buf, size_t len) {
return (size_t)ret;
}
ZGCIdPrinter::ZGCIdPrinter() :
_minor_gc_id(GCId::undefined()),
ZGCIdPrinter::ZGCIdPrinter()
: _minor_gc_id(GCId::undefined()),
_major_gc_id(GCId::undefined()),
_major_tag('-') { }

View File

@ -109,8 +109,8 @@ static const ZStatSampler ZSamplerJavaThreads("System", "Java Threads", ZStatUni
ZGenerationYoung* ZGeneration::_young;
ZGenerationOld* ZGeneration::_old;
ZGeneration::ZGeneration(ZGenerationId id, ZPageTable* page_table, ZPageAllocator* page_allocator) :
_id(id),
ZGeneration::ZGeneration(ZGenerationId id, ZPageTable* page_table, ZPageAllocator* page_allocator)
: _id(id),
_page_allocator(page_allocator),
_page_table(page_table),
_forwarding_table(),
@ -128,8 +128,7 @@ ZGeneration::ZGeneration(ZGenerationId id, ZPageTable* page_table, ZPageAllocato
_stat_workers(),
_stat_mark(),
_stat_relocation(),
_gc_timer(nullptr) {
}
_gc_timer(nullptr) {}
bool ZGeneration::is_initialized() const {
return _mark.is_initialized();
@ -403,8 +402,8 @@ private:
bool _success;
public:
VM_ZOperation() :
_gc_id(GCId::current()),
VM_ZOperation()
: _gc_id(GCId::current()),
_success(false) {}
virtual bool block_jni_critical() const {
@ -475,8 +474,8 @@ ZYoungTypeSetter::~ZYoungTypeSetter() {
ZGenerationYoung::ZGenerationYoung(ZPageTable* page_table,
const ZForwardingTable* old_forwarding_table,
ZPageAllocator* page_allocator) :
ZGeneration(ZGenerationId::young, page_table, page_allocator),
ZPageAllocator* page_allocator)
: ZGeneration(ZGenerationId::young, page_table, page_allocator),
_active_type(ZYoungType::none),
_tenuring_threshold(0),
_remembered(page_table, old_forwarding_table, page_allocator),
@ -494,8 +493,8 @@ private:
ZStatTimer _stat_timer;
public:
ZGenerationCollectionScopeYoung(ZYoungType type, ConcurrentGCTimer* gc_timer) :
_type_setter(type),
ZGenerationCollectionScopeYoung(ZYoungType type, ConcurrentGCTimer* gc_timer)
: _type_setter(type),
_stat_timer(ZPhaseGenerationYoung[(int)type], gc_timer) {
// Update statistics and set the GC timer
ZGeneration::young()->at_collection_start(gc_timer);
@ -933,8 +932,8 @@ ZGenerationTracer* ZGenerationYoung::jfr_tracer() {
return &_jfr_tracer;
}
ZGenerationOld::ZGenerationOld(ZPageTable* page_table, ZPageAllocator* page_allocator) :
ZGeneration(ZGenerationId::old, page_table, page_allocator),
ZGenerationOld::ZGenerationOld(ZPageTable* page_table, ZPageAllocator* page_allocator)
: ZGeneration(ZGenerationId::old, page_table, page_allocator),
_reference_processor(&_workers),
_weak_roots_processor(&_workers),
_unload(&_workers),
@ -950,8 +949,8 @@ private:
ZDriverUnlocker _unlocker;
public:
ZGenerationCollectionScopeOld(ConcurrentGCTimer* gc_timer) :
_stat_timer(ZPhaseGenerationOld, gc_timer),
ZGenerationCollectionScopeOld(ConcurrentGCTimer* gc_timer)
: _stat_timer(ZPhaseGenerationOld, gc_timer),
_unlocker() {
// Update statistics and set the GC timer
ZGeneration::old()->at_collection_start(gc_timer);
@ -1248,8 +1247,8 @@ void ZGenerationOld::set_soft_reference_policy(bool clear) {
class ZRendezvousHandshakeClosure : public HandshakeClosure {
public:
ZRendezvousHandshakeClosure() :
HandshakeClosure("ZRendezvous") {}
ZRendezvousHandshakeClosure()
: HandshakeClosure("ZRendezvous") {}
void do_thread(Thread* thread) {
// Does nothing
@ -1377,8 +1376,8 @@ private:
ZBarrierSetNMethod* const _bs_nm;
public:
ZRemapNMethodClosure() :
_bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
ZRemapNMethodClosure()
: _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
virtual void do_nmethod(nmethod* nm) {
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
@ -1414,8 +1413,8 @@ private:
ZRemapNMethodClosure _nm_cl;
public:
ZRemapYoungRootsTask(ZPageTable* page_table, ZPageAllocator* page_allocator) :
ZTask("ZRemapYoungRootsTask"),
ZRemapYoungRootsTask(ZPageTable* page_table, ZPageAllocator* page_allocator)
: ZTask("ZRemapYoungRootsTask"),
_old_pages_parallel_iterator(page_table, ZGenerationId::old, page_allocator),
_roots_colored(ZGenerationIdOptional::old),
_roots_uncolored(ZGenerationIdOptional::old),

View File

@ -35,8 +35,8 @@
#include "utilities/debug.hpp"
template <typename T>
inline ZGranuleMap<T>::ZGranuleMap(size_t max_offset) :
_size(max_offset >> ZGranuleSizeShift),
inline ZGranuleMap<T>::ZGranuleMap(size_t max_offset)
: _size(max_offset >> ZGranuleSizeShift),
_map(MmapArrayAllocator<T>::allocate(_size, mtGC)) {
assert(is_aligned(max_offset, ZGranuleSize), "Misaligned");
}
@ -102,7 +102,7 @@ inline void ZGranuleMap<T>::release_put(zoffset offset, size_t size, T value) {
}
template <typename T, bool Parallel>
inline ZGranuleMapIterator<T, Parallel>::ZGranuleMapIterator(const ZGranuleMap<T>* granule_map) :
ZArrayIteratorImpl<T, Parallel>(granule_map->_map, granule_map->_size) {}
inline ZGranuleMapIterator<T, Parallel>::ZGranuleMapIterator(const ZGranuleMap<T>* granule_map)
: ZArrayIteratorImpl<T, Parallel>(granule_map->_map, granule_map->_size) {}
#endif // SHARE_GC_Z_ZGRANULEMAP_INLINE_HPP

View File

@ -54,8 +54,8 @@ static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUn
ZHeap* ZHeap::_heap = nullptr;
ZHeap::ZHeap() :
_page_allocator(MinHeapSize, InitialHeapSize, SoftMaxHeapSize, MaxHeapSize),
ZHeap::ZHeap()
: _page_allocator(MinHeapSize, InitialHeapSize, SoftMaxHeapSize, MaxHeapSize),
_page_table(),
_allocator_eden(),
_allocator_relocation(),

View File

@ -45,8 +45,8 @@ private:
CHeapBitMap _bitmap;
public:
ZHeapIteratorBitMap(size_t size_in_bits) :
_bitmap(size_in_bits, mtGC) {}
ZHeapIteratorBitMap(size_t size_in_bits)
: _bitmap(size_in_bits, mtGC) {}
bool try_set_bit(size_t index) {
return _bitmap.par_set_bit(index);
@ -63,8 +63,8 @@ private:
OopFieldClosure* _field_cl;
public:
ZHeapIteratorContext(ZHeapIterator* iter, ObjectClosure* object_cl, OopFieldClosure* field_cl, uint worker_id) :
_iter(iter),
ZHeapIteratorContext(ZHeapIterator* iter, ObjectClosure* object_cl, OopFieldClosure* field_cl, uint worker_id)
: _iter(iter),
_queue(_iter->_queues.queue(worker_id)),
_array_queue(_iter->_array_queues.queue(worker_id)),
_worker_id(worker_id),
@ -127,8 +127,8 @@ private:
}
public:
ZHeapIteratorColoredRootOopClosure(const ZHeapIteratorContext& context) :
_context(context) {}
ZHeapIteratorColoredRootOopClosure(const ZHeapIteratorContext& context)
: _context(context) {}
virtual void do_oop(oop* p) {
_context.visit_field(nullptr, p);
@ -152,8 +152,8 @@ private:
}
public:
ZHeapIteratorUncoloredRootOopClosure(const ZHeapIteratorContext& context) :
_context(context) {}
ZHeapIteratorUncoloredRootOopClosure(const ZHeapIteratorContext& context)
: _context(context) {}
virtual void do_oop(oop* p) {
_context.visit_field(nullptr, p);
@ -183,8 +183,8 @@ private:
}
public:
ZHeapIteratorOopClosure(const ZHeapIteratorContext& context, oop base) :
OopIterateClosure(),
ZHeapIteratorOopClosure(const ZHeapIteratorContext& context, oop base)
: OopIterateClosure(),
_context(context),
_base(base) {}
@ -217,8 +217,8 @@ public:
const ZHeapIteratorContext& _context;
public:
explicit NativeAccessClosure(const ZHeapIteratorContext& context) :
_context(context) {}
explicit NativeAccessClosure(const ZHeapIteratorContext& context)
: _context(context) {}
virtual void do_oop(oop* p) {
assert(!ZCollectedHeap::heap()->is_in(p), "Should not be in heap");
@ -240,8 +240,8 @@ public:
virtual void do_method(Method* m) {}
};
ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) :
_visit_weaks(visit_weaks),
ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks)
: _visit_weaks(visit_weaks),
_bitmaps(ZAddressOffsetMax),
_bitmaps_lock(),
_queues(nworkers),
@ -330,8 +330,8 @@ private:
BarrierSetNMethod* const _bs_nm;
public:
ZHeapIteratorNMethodClosure(OopClosure* cl) :
_cl(cl),
ZHeapIteratorNMethodClosure(OopClosure* cl)
: _cl(cl),
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
virtual void do_nmethod(nmethod* nm) {
@ -351,8 +351,8 @@ private:
CodeBlobToNMethodClosure _cb_cl;
public:
ZHeapIteratorThreadClosure(OopClosure* cl, NMethodClosure* nm_cl) :
_cl(cl),
ZHeapIteratorThreadClosure(OopClosure* cl, NMethodClosure* nm_cl)
: _cl(cl),
_cb_cl(nm_cl) {}
void do_thread(Thread* thread) {

View File

@ -51,8 +51,8 @@ class ZIndexDistributorStriped : public CHeapObj<mtGC> {
}
public:
ZIndexDistributorStriped(int max_index) :
_max_index(max_index),
ZIndexDistributorStriped(int max_index)
: _max_index(max_index),
_claim_stripe(0),
_mem() {
memset(_mem, 0, MemSize + ZCacheLineSize);
@ -266,8 +266,8 @@ private:
}
public:
ZIndexDistributorClaimTree(int count) :
_last_level_segment_size_shift(last_level_segment_size_shift(count)),
ZIndexDistributorClaimTree(int count)
: _last_level_segment_size_shift(last_level_segment_size_shift(count)),
_malloced((char*)os::malloc(claim_variables_size() + os::vm_page_size(), mtGC)),
_claim_array((volatile int*)align_up(_malloced, os::vm_page_size())) {
@ -303,8 +303,8 @@ inline void* ZIndexDistributor::create_strategy(int count) {
};
}
inline ZIndexDistributor::ZIndexDistributor(int count) :
_strategy(create_strategy(count)) {}
inline ZIndexDistributor::ZIndexDistributor(int count)
: _strategy(create_strategy(count)) {}
inline ZIndexDistributor::~ZIndexDistributor() {
switch (ZIndexDistributorStrategy) {

View File

@ -67,8 +67,8 @@ private:
Function _function;
public:
ZBasicOopIterateClosure(Function function) :
_function(function) {}
ZBasicOopIterateClosure(Function function)
: _function(function) {}
virtual void do_oop(oop* p) {
_function((volatile zpointer*)p);
@ -93,8 +93,8 @@ void ZIterator::basic_oop_iterate(oop obj, Function function) {
}
template <typename Function>
ZObjectClosure<Function>::ZObjectClosure(Function function) :
_function(function) {}
ZObjectClosure<Function>::ZObjectClosure(Function function)
: _function(function) {}
template <typename Function>
void ZObjectClosure<Function>::do_object(oop obj) {

View File

@ -29,8 +29,8 @@
#include "utilities/debug.hpp"
template <typename T>
inline ZListNode<T>::ZListNode() :
_next(this),
inline ZListNode<T>::ZListNode()
: _next(this),
_prev(this) {}
template <typename T>
@ -91,8 +91,8 @@ inline T* ZList<T>::cast_to_outer(ZListNode<T>* node) const {
}
template <typename T>
inline ZList<T>::ZList() :
_head(),
inline ZList<T>::ZList()
: _head(),
_size(0) {
verify_head();
}
@ -209,8 +209,8 @@ inline T* ZList<T>::remove_last() {
}
template <typename T, bool Forward>
inline ZListIteratorImpl<T, Forward>::ZListIteratorImpl(const ZList<T>* list) :
_list(list),
inline ZListIteratorImpl<T, Forward>::ZListIteratorImpl(const ZList<T>* list)
: _list(list),
_next(Forward ? list->first() : list->last()) {}
template <typename T, bool Forward>
@ -226,8 +226,8 @@ inline bool ZListIteratorImpl<T, Forward>::next(T** elem) {
}
template <typename T, bool Forward>
inline ZListRemoveIteratorImpl<T, Forward>::ZListRemoveIteratorImpl(ZList<T>* list) :
_list(list) {}
inline ZListRemoveIteratorImpl<T, Forward>::ZListRemoveIteratorImpl(ZList<T>* list)
: _list(list) {}
template <typename T, bool Forward>
inline bool ZListRemoveIteratorImpl<T, Forward>::next(T** elem) {

View File

@ -40,8 +40,8 @@ static size_t bitmap_size(uint32_t size, size_t nsegments) {
return MAX2<size_t>(size, nsegments) * 2;
}
ZLiveMap::ZLiveMap(uint32_t size) :
_seqnum(0),
ZLiveMap::ZLiveMap(uint32_t size)
: _seqnum(0),
_live_objects(0),
_live_bytes(0),
_segment_live_bits(0),

View File

@ -43,8 +43,8 @@ inline void ZLock::unlock() {
_lock.unlock();
}
inline ZReentrantLock::ZReentrantLock() :
_lock(),
inline ZReentrantLock::ZReentrantLock()
: _lock(),
_owner(nullptr),
_count(0) {}
@ -103,8 +103,8 @@ inline void ZConditionLock::notify_all() {
}
template <typename T>
inline ZLocker<T>::ZLocker(T* lock) :
_lock(lock) {
inline ZLocker<T>::ZLocker(T* lock)
: _lock(lock) {
if (_lock != nullptr) {
_lock->lock();
}

View File

@ -79,8 +79,8 @@ static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredYoung("Concurrent M
static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredOld("Concurrent Mark Root Uncolored", ZGenerationId::old);
static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredOld("Concurrent Mark Root Colored", ZGenerationId::old);
ZMark::ZMark(ZGeneration* generation, ZPageTable* page_table) :
_generation(generation),
ZMark::ZMark(ZGeneration* generation, ZPageTable* page_table)
: _generation(generation),
_page_table(page_table),
_allocator(),
_stripes(_allocator.start()),
@ -298,8 +298,8 @@ private:
const bool _visit_metadata;
public:
ZMarkBarrierFollowOopClosure() :
OopIterateClosure(discoverer()),
ZMarkBarrierFollowOopClosure()
: OopIterateClosure(discoverer()),
_visit_metadata(visit_metadata()) {}
virtual void do_oop(oop* p) {
@ -561,8 +561,8 @@ private:
bool _flushed;
public:
ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
HandshakeClosure("ZMarkFlushAndFreeStacks"),
ZMarkFlushAndFreeStacksClosure(ZMark* mark)
: HandshakeClosure("ZMarkFlushAndFreeStacks"),
_mark(mark),
_flushed(false) {}
@ -585,8 +585,8 @@ private:
ThreadClosure* _cl;
public:
VM_ZMarkFlushOperation(ThreadClosure* cl) :
_cl(cl) {}
VM_ZMarkFlushOperation(ThreadClosure* cl)
: _cl(cl) {}
virtual bool evaluate_at_safepoint() const {
return false;
@ -733,8 +733,8 @@ private:
ZBarrierSetNMethod* const _bs_nm;
public:
ZMarkNMethodClosure() :
_bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
ZMarkNMethodClosure()
: _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
virtual void do_nmethod(nmethod* nm) {
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
@ -762,8 +762,8 @@ private:
ZBarrierSetNMethod* const _bs_nm;
public:
ZMarkYoungNMethodClosure() :
_bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
ZMarkYoungNMethodClosure()
: _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
virtual void do_nmethod(nmethod* nm) {
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
@ -821,8 +821,8 @@ private:
ZMarkNMethodClosure _nm_cl;
public:
ZMarkOldRootsTask(ZMark* mark) :
ZTask("ZMarkOldRootsTask"),
ZMarkOldRootsTask(ZMark* mark)
: ZTask("ZMarkOldRootsTask"),
_mark(mark),
_roots_colored(ZGenerationIdOptional::old),
_roots_uncolored(ZGenerationIdOptional::old),
@ -868,8 +868,8 @@ public:
ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>::do_cld(cld);
}
ZMarkYoungCLDClosure(OopClosure* cl) :
ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>(cl) {}
ZMarkYoungCLDClosure(OopClosure* cl)
: ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>(cl) {}
};
class ZMarkYoungRootsTask : public ZTask {
@ -885,8 +885,8 @@ private:
ZMarkYoungNMethodClosure _nm_cl;
public:
ZMarkYoungRootsTask(ZMark* mark) :
ZTask("ZMarkYoungRootsTask"),
ZMarkYoungRootsTask(ZMark* mark)
: ZTask("ZMarkYoungRootsTask"),
_mark(mark),
_roots_colored(ZGenerationIdOptional::young),
_roots_uncolored(ZGenerationIdOptional::young),
@ -921,8 +921,8 @@ private:
ZMark* const _mark;
public:
ZMarkTask(ZMark* mark) :
ZRestartableTask("ZMarkTask"),
ZMarkTask(ZMark* mark)
: ZRestartableTask("ZMarkTask"),
_mark(mark) {
_mark->prepare_work();
}
@ -1043,8 +1043,8 @@ private:
const ZGenerationId _generation_id;
public:
ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes, ZGenerationId id) :
_stripes(stripes),
ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes, ZGenerationId id)
: _stripes(stripes),
_generation_id(id) {}
void do_thread(Thread* thread) {

View File

@ -30,13 +30,13 @@ static size_t shift_for_stripes(size_t nstripes) {
return ZMarkStripeShift + exact_log2(nstripes);
}
ZMarkCacheEntry::ZMarkCacheEntry() :
_page(nullptr),
ZMarkCacheEntry::ZMarkCacheEntry()
: _page(nullptr),
_objects(0),
_bytes(0) {}
ZMarkCache::ZMarkCache(size_t nstripes) :
_shift(shift_for_stripes(nstripes)) {}
ZMarkCache::ZMarkCache(size_t nstripes)
: _shift(shift_for_stripes(nstripes)) {}
ZMarkCache::~ZMarkCache() {
// Evict all entries

View File

@ -28,8 +28,8 @@
inline ZMarkContext::ZMarkContext(size_t nstripes,
ZMarkStripe* stripe,
ZMarkThreadLocalStacks* stacks) :
_cache(nstripes),
ZMarkThreadLocalStacks* stacks)
: _cache(nstripes),
_stripe(stripe),
_stacks(stacks),
_nstripes(nstripes),

View File

@ -30,12 +30,12 @@
#include "utilities/debug.hpp"
#include "utilities/powerOfTwo.hpp"
ZMarkStripe::ZMarkStripe(uintptr_t base) :
_published(base),
ZMarkStripe::ZMarkStripe(uintptr_t base)
: _published(base),
_overflowed(base) {}
ZMarkStripeSet::ZMarkStripeSet(uintptr_t base) :
_nstripes_mask(0),
ZMarkStripeSet::ZMarkStripeSet(uintptr_t base)
: _nstripes_mask(0),
_stripes() {
// Re-construct array elements with the correct base
@ -93,8 +93,8 @@ ZMarkStripe* ZMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) {
return &_stripes[index];
}
ZMarkThreadLocalStacks::ZMarkThreadLocalStacks() :
_magazine(nullptr) {
ZMarkThreadLocalStacks::ZMarkThreadLocalStacks()
: _magazine(nullptr) {
for (size_t i = 0; i < ZMarkStripesMax; i++) {
_stacks[i] = nullptr;
}

View File

@ -31,8 +31,8 @@
#include "utilities/debug.hpp"
template <typename T, size_t S>
inline ZStack<T, S>::ZStack() :
_top(0),
inline ZStack<T, S>::ZStack()
: _top(0),
_next(nullptr) {}
template <typename T, size_t S>
@ -76,8 +76,8 @@ inline ZStack<T, S>** ZStack<T, S>::next_addr() {
}
template <typename T>
inline ZStackList<T>::ZStackList(uintptr_t base) :
_base(base),
inline ZStackList<T>::ZStackList(uintptr_t base)
: _base(base),
_head(encode_versioned_pointer(nullptr, 0)) {}
template <typename T>

View File

@ -32,8 +32,8 @@
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
ZMarkStackSpace::ZMarkStackSpace() :
_expand_lock(),
ZMarkStackSpace::ZMarkStackSpace()
: _expand_lock(),
_start(0),
_top(0),
_end(0) {
@ -168,8 +168,8 @@ void ZMarkStackSpace::free() {
_top = _start;
}
ZMarkStackAllocator::ZMarkStackAllocator() :
_space(),
ZMarkStackAllocator::ZMarkStackAllocator()
: _space(),
_freelist(_space.start()),
_expanded_recently(false) {}

View File

@ -92,16 +92,16 @@ public:
// what _entry is initialized to.
}
ZMarkStackEntry(uintptr_t object_address, bool mark, bool inc_live, bool follow, bool finalizable) :
_entry(field_object_address::encode(object_address) |
ZMarkStackEntry(uintptr_t object_address, bool mark, bool inc_live, bool follow, bool finalizable)
: _entry(field_object_address::encode(object_address) |
field_mark::encode(mark) |
field_inc_live::encode(inc_live) |
field_follow::encode(follow) |
field_partial_array::encode(false) |
field_finalizable::encode(finalizable)) {}
ZMarkStackEntry(size_t partial_array_offset, size_t partial_array_length, bool finalizable) :
_entry(field_partial_array_offset::encode(partial_array_offset) |
ZMarkStackEntry(size_t partial_array_offset, size_t partial_array_length, bool finalizable)
: _entry(field_partial_array_offset::encode(partial_array_offset) |
field_partial_array_length::encode(partial_array_length) |
field_partial_array::encode(true) |
field_finalizable::encode(finalizable)) {}

View File

@ -33,8 +33,8 @@
#include "runtime/osThread.hpp"
#include "runtime/thread.inline.hpp"
inline ZMarkTerminate::ZMarkTerminate() :
_nworkers(0),
inline ZMarkTerminate::ZMarkTerminate()
: _nworkers(0),
_nworking(0),
_nawakening(0),
_resurrected(false),

View File

@ -69,16 +69,16 @@ void ZMemoryManager::grow_from_back(ZMemory* area, size_t size) {
area->grow_from_back(size);
}
ZMemoryManager::Callbacks::Callbacks() :
_create(nullptr),
ZMemoryManager::Callbacks::Callbacks()
: _create(nullptr),
_destroy(nullptr),
_shrink_from_front(nullptr),
_shrink_from_back(nullptr),
_grow_from_front(nullptr),
_grow_from_back(nullptr) {}
ZMemoryManager::ZMemoryManager() :
_freelist(),
ZMemoryManager::ZMemoryManager()
: _freelist(),
_callbacks() {}
void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {

View File

@ -30,8 +30,8 @@
#include "gc/z/zList.inline.hpp"
#include "utilities/debug.hpp"
inline ZMemory::ZMemory(zoffset start, size_t size) :
_start(start),
inline ZMemory::ZMemory(zoffset start, size_t size)
: _start(start),
_end(to_zoffset_end(start, size)) {}
inline zoffset ZMemory::start() const {

View File

@ -27,8 +27,8 @@
#include "runtime/timer.hpp"
#include "utilities/ticks.hpp"
ZMetronome::ZMetronome(uint64_t hz) :
_monitor(Monitor::nosafepoint, "ZMetronome_lock"),
ZMetronome::ZMetronome(uint64_t hz)
: _monitor(Monitor::nosafepoint, "ZMetronome_lock"),
_interval_ms(MILLIUNITS / hz),
_start_ms(0),
_nticks(0),

View File

@ -341,8 +341,8 @@ private:
}
public:
ZNMethodUnlinkClosure(bool unloading_occurred) :
_unloading_occurred(unloading_occurred),
ZNMethodUnlinkClosure(bool unloading_occurred)
: _unloading_occurred(unloading_occurred),
_failed(false) {}
virtual void do_nmethod(nmethod* nm) {
@ -401,8 +401,8 @@ private:
ICRefillVerifier* _verifier;
public:
ZNMethodUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
ZTask("ZNMethodUnlinkTask"),
ZNMethodUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier)
: ZTask("ZNMethodUnlinkTask"),
_cl(unloading_occurred),
_verifier(verifier) {
ZNMethodTable::nmethods_do_begin(false /* secondary */);

View File

@ -26,8 +26,8 @@
#include "gc/z/zNMethodData.hpp"
#include "utilities/debug.hpp"
ZNMethodData::ZNMethodData() :
_lock(),
ZNMethodData::ZNMethodData()
: _lock(),
_barriers(),
_immediate_oops(),
_has_non_immediate_oops(false) {}

View File

@ -55,13 +55,13 @@ private:
uint64_t _entry;
public:
explicit ZNMethodTableEntry(bool unregistered = false) :
_entry(field_registered::encode(false) |
explicit ZNMethodTableEntry(bool unregistered = false)
: _entry(field_registered::encode(false) |
field_unregistered::encode(unregistered) |
field_method::encode(nullptr)) {}
explicit ZNMethodTableEntry(nmethod* method) :
_entry(field_registered::encode(true) |
explicit ZNMethodTableEntry(nmethod* method)
: _entry(field_registered::encode(true) |
field_unregistered::encode(false) |
field_method::encode(method)) {}

View File

@ -29,8 +29,8 @@
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
ZNMethodTableIteration::ZNMethodTableIteration() :
_table(nullptr),
ZNMethodTableIteration::ZNMethodTableIteration()
: _table(nullptr),
_size(0),
_claimed(0) {}

View File

@ -29,8 +29,8 @@
#include "runtime/interfaceSupport.inline.hpp"
#include "utilities/debug.hpp"
ZObjArrayAllocator::ZObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread) :
ObjArrayAllocator(klass, word_size, length, do_zero, thread) {}
ZObjArrayAllocator::ZObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread)
: ObjArrayAllocator(klass, word_size, length, do_zero, thread) {}
void ZObjArrayAllocator::yield_for_safepoint() const {
ThreadBlockInVM tbivm(JavaThread::cast(_thread));

View File

@ -40,8 +40,8 @@
static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
ZObjectAllocator::ZObjectAllocator(ZPageAge age) :
_age(age),
ZObjectAllocator::ZObjectAllocator(ZPageAge age)
: _age(age),
_use_per_cpu_shared_small_pages(ZHeuristics::use_per_cpu_shared_small_pages()),
_used(0),
_undone(0),

View File

@ -33,8 +33,8 @@
#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
ZPage::ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) :
_type(type),
ZPage::ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem)
: _type(type),
_generation_id(ZGenerationId::young),
_age(ZPageAge::eden),
_numa_id((uint8_t)-1),
@ -231,8 +231,8 @@ private:
oop _result;
public:
ZFindBaseOopClosure(volatile zpointer* p) :
_p(p),
ZFindBaseOopClosure(volatile zpointer* p)
: _p(p),
_result(nullptr) {}
virtual void do_object(oop obj) {

View File

@ -54,8 +54,8 @@ static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flu
static const ZStatCounter ZCounterDefragment("Memory", "Defragment", ZStatUnitOpsPerSecond);
static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
ZSafePageRecycle::ZSafePageRecycle(ZPageAllocator* page_allocator) :
_page_allocator(page_allocator),
ZSafePageRecycle::ZSafePageRecycle(ZPageAllocator* page_allocator)
: _page_allocator(page_allocator),
_unsafe_to_recycle() {}
void ZSafePageRecycle::activate() {
@ -112,8 +112,8 @@ private:
ZFuture<bool> _stall_result;
public:
ZPageAllocation(ZPageType type, size_t size, ZAllocationFlags flags) :
_type(type),
ZPageAllocation(ZPageType type, size_t size, ZAllocationFlags flags)
: _type(type),
_size(size),
_flags(flags),
_young_seqnum(ZGeneration::young()->seqnum()),
@ -180,8 +180,8 @@ public:
ZPageAllocator::ZPageAllocator(size_t min_capacity,
size_t initial_capacity,
size_t soft_max_capacity,
size_t max_capacity) :
_lock(),
size_t max_capacity)
: _lock(),
_cache(),
_virtual(max_capacity),
_physical(max_capacity),
@ -237,8 +237,8 @@ private:
const zoffset_end _end;
public:
ZPreTouchTask(const ZPhysicalMemoryManager* physical, zoffset start, zoffset_end end) :
ZTask("ZPreTouchTask"),
ZPreTouchTask(const ZPhysicalMemoryManager* physical, zoffset start, zoffset_end end)
: ZTask("ZPreTouchTask"),
_physical(physical),
_start(start),
_end(end) {}

View File

@ -37,8 +37,8 @@ inline ZPageAllocatorStats::ZPageAllocatorStats(size_t min_capacity,
size_t freed,
size_t promoted,
size_t compacted,
size_t allocation_stalls) :
_min_capacity(min_capacity),
size_t allocation_stalls)
: _min_capacity(min_capacity),
_max_capacity(max_capacity),
_soft_max_capacity(soft_max_capacity),
_capacity(capacity),

View File

@ -50,12 +50,12 @@ public:
virtual bool do_page(const ZPage* page) = 0;
};
ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
_requested(requested),
ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested)
: _requested(requested),
_flushed(0) {}
ZPageCache::ZPageCache() :
_small(),
ZPageCache::ZPageCache()
: _small(),
_medium(),
_large(),
_last_commit(0) {}
@ -254,8 +254,8 @@ void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
public:
ZPageCacheFlushForAllocationClosure(size_t requested) :
ZPageCacheFlushClosure(requested) {}
ZPageCacheFlushForAllocationClosure(size_t requested)
: ZPageCacheFlushClosure(requested) {}
virtual bool do_page(const ZPage* page) {
if (_flushed < _requested) {
@ -280,8 +280,8 @@ private:
uint64_t* _timeout;
public:
ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) :
ZPageCacheFlushClosure(requested),
ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout)
: ZPageCacheFlushClosure(requested),
_now(now),
_timeout(timeout) {
// Set initial timeout

View File

@ -29,8 +29,8 @@
#include "runtime/orderAccess.hpp"
#include "utilities/debug.hpp"
ZPageTable::ZPageTable() :
_map(ZAddressOffsetMax) {}
ZPageTable::ZPageTable()
: _map(ZAddressOffsetMax) {}
void ZPageTable::insert(ZPage* page) {
const zoffset offset = page->start();
@ -68,8 +68,8 @@ void ZPageTable::replace(ZPage* old_page, ZPage* new_page) {
}
}
ZGenerationPagesParallelIterator::ZGenerationPagesParallelIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator) :
_iterator(page_table),
ZGenerationPagesParallelIterator::ZGenerationPagesParallelIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator)
: _iterator(page_table),
_generation_id(id),
_page_allocator(page_allocator) {
_page_allocator->enable_safe_destroy();
@ -81,8 +81,8 @@ ZGenerationPagesParallelIterator::~ZGenerationPagesParallelIterator() {
_page_allocator->disable_safe_destroy();
}
ZGenerationPagesIterator::ZGenerationPagesIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator) :
_iterator(page_table),
ZGenerationPagesIterator::ZGenerationPagesIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator)
: _iterator(page_table),
_generation_id(id),
_page_allocator(page_allocator) {
_page_allocator->enable_safe_destroy();

View File

@ -45,8 +45,8 @@ inline ZPage* ZPageTable::at(size_t index) const {
return _map.at(index);
}
inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* table) :
_iter(&table->_map),
inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* table)
: _iter(&table->_map),
_prev(nullptr) {}
inline bool ZPageTableIterator::next(ZPage** page) {
@ -62,8 +62,8 @@ inline bool ZPageTableIterator::next(ZPage** page) {
return false;
}
inline ZPageTableParallelIterator::ZPageTableParallelIterator(const ZPageTable* table) :
_table(table),
inline ZPageTableParallelIterator::ZPageTableParallelIterator(const ZPageTable* table)
: _table(table),
_index_distributor(int(ZAddressOffsetMax >> ZGranuleSizeShift)) {}
template <typename Function>

View File

@ -40,16 +40,16 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
ZPhysicalMemory::ZPhysicalMemory() :
_segments() {}
ZPhysicalMemory::ZPhysicalMemory()
: _segments() {}
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
_segments() {
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment)
: _segments() {
add_segment(segment);
}
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
_segments() {
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem)
: _segments() {
add_segments(pmem);
}
@ -231,8 +231,8 @@ ZPhysicalMemory ZPhysicalMemory::split_committed() {
return pmem;
}
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
_backing(max_capacity) {
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity)
: _backing(max_capacity) {
// Make the whole range free
_manager.free(zoffset(0), max_capacity);
}

View File

@ -29,13 +29,13 @@
#include "gc/z/zAddress.inline.hpp"
#include "utilities/debug.hpp"
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() :
_start(zoffset(UINTPTR_MAX)),
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment()
: _start(zoffset(UINTPTR_MAX)),
_end(zoffset(UINTPTR_MAX)),
_committed(false) {}
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(zoffset start, size_t size, bool committed) :
_start(start),
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(zoffset start, size_t size, bool committed)
: _start(start),
_end(start + size),
_committed(committed) {}

View File

@ -110,8 +110,8 @@ static void list_append(zaddress& head, zaddress& tail, zaddress reference) {
tail = reference;
}
ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers) :
_workers(workers),
ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers)
: _workers(workers),
_soft_reference_policy(nullptr),
_encountered_count(),
_discovered_count(),
@ -426,8 +426,8 @@ private:
ZReferenceProcessor* const _reference_processor;
public:
ZReferenceProcessorTask(ZReferenceProcessor* reference_processor) :
ZTask("ZReferenceProcessorTask"),
ZReferenceProcessorTask(ZReferenceProcessor* reference_processor)
: ZTask("ZReferenceProcessorTask"),
_reference_processor(reference_processor) {}
virtual void work() {

View File

@ -81,8 +81,8 @@ static zaddress forwarding_insert(ZForwarding* forwarding, zaddress from_addr, z
return forwarding_insert(forwarding, ZAddress::offset(from_addr), to_addr, cursor);
}
ZRelocateQueue::ZRelocateQueue() :
_lock(),
ZRelocateQueue::ZRelocateQueue()
: _lock(),
_queue(),
_nworkers(0),
_nsynchronized(0),
@ -208,8 +208,8 @@ private:
ZRelocateQueue* const _queue;
public:
ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue) :
_queue(queue) {
ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
: _queue(queue) {
_queue->synchronize_thread();
}
@ -318,8 +318,8 @@ void ZRelocateQueue::desynchronize() {
_lock.notify_all();
}
ZRelocate::ZRelocate(ZGeneration* generation) :
_generation(generation),
ZRelocate::ZRelocate(ZGeneration* generation)
: _generation(generation),
_queue() {}
ZWorkers* ZRelocate::workers() const {
@ -436,8 +436,8 @@ private:
volatile size_t _in_place_count;
public:
ZRelocateSmallAllocator(ZGeneration* generation) :
_generation(generation),
ZRelocateSmallAllocator(ZGeneration* generation)
: _generation(generation),
_in_place_count(0) {}
ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
@ -487,8 +487,8 @@ private:
volatile size_t _in_place_count;
public:
ZRelocateMediumAllocator(ZGeneration* generation) :
_generation(generation),
ZRelocateMediumAllocator(ZGeneration* generation)
: _generation(generation),
_lock(),
_shared(),
_in_place(false),
@ -897,8 +897,8 @@ private:
}
public:
ZRelocateWork(Allocator* allocator, ZGeneration* generation) :
_allocator(allocator),
ZRelocateWork(Allocator* allocator, ZGeneration* generation)
: _allocator(allocator),
_forwarding(nullptr),
_target(),
_generation(generation),
@ -1059,9 +1059,9 @@ private:
ZJavaThreadsIterator _threads_iter;
public:
ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation) :
ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
_threads_iter(generation->id_optional()) {}
ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
: ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
_threads_iter(generation->id_optional()) {}
virtual void work() {
ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
@ -1078,8 +1078,8 @@ private:
ZRelocateMediumAllocator _medium_allocator;
public:
ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue) :
ZRestartableTask("ZRelocateTask"),
ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
: ZRestartableTask("ZRelocateTask"),
_iter(relocation_set),
_generation(relocation_set->generation()),
_queue(queue),
@ -1193,8 +1193,8 @@ private:
ZArrayParallelIterator<ZPage*> _iter;
public:
ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages) :
ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
: ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
_timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
_iter(pages) {}
@ -1254,8 +1254,8 @@ private:
ZArrayParallelIterator<ZPage*> _iter;
public:
ZFlipAgePagesTask(const ZArray<ZPage*>* pages) :
ZTask("ZPromotePagesTask"),
ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
: ZTask("ZPromotePagesTask"),
_iter(pages) {}
virtual void work() {

View File

@ -80,8 +80,8 @@ private:
}
public:
ZRelocationSetInstallTask(ZForwardingAllocator* allocator, const ZRelocationSetSelector* selector) :
ZTask("ZRelocationSetInstallTask"),
ZRelocationSetInstallTask(ZForwardingAllocator* allocator, const ZRelocationSetSelector* selector)
: ZTask("ZRelocationSetInstallTask"),
_allocator(allocator),
_forwardings(nullptr),
_nforwardings(selector->selected_small()->length() + selector->selected_medium()->length()),
@ -130,8 +130,8 @@ public:
}
};
ZRelocationSet::ZRelocationSet(ZGeneration* generation) :
_generation(generation),
ZRelocationSet::ZRelocationSet(ZGeneration* generation)
: _generation(generation),
_allocator(),
_forwardings(nullptr),
_nforwardings(0),

View File

@ -29,7 +29,7 @@
#include "gc/z/zArray.inline.hpp"
template <bool Parallel>
inline ZRelocationSetIteratorImpl<Parallel>::ZRelocationSetIteratorImpl(ZRelocationSet* relocation_set) :
ZArrayIteratorImpl<ZForwarding*, Parallel>(relocation_set->_forwardings, relocation_set->_nforwardings) {}
inline ZRelocationSetIteratorImpl<Parallel>::ZRelocationSetIteratorImpl(ZRelocationSet* relocation_set)
: ZArrayIteratorImpl<ZForwarding*, Parallel>(relocation_set->_forwardings, relocation_set->_nforwardings) {}
#endif // SHARE_GC_Z_ZRELOCATIONSET_INLINE_HPP

View File

@ -33,8 +33,8 @@
#include "utilities/debug.hpp"
#include "utilities/powerOfTwo.hpp"
ZRelocationSetSelectorGroupStats::ZRelocationSetSelectorGroupStats() :
_npages_candidates(0),
ZRelocationSetSelectorGroupStats::ZRelocationSetSelectorGroupStats()
: _npages_candidates(0),
_total(0),
_live(0),
_empty(0),
@ -45,8 +45,8 @@ ZRelocationSetSelectorGroup::ZRelocationSetSelectorGroup(const char* name,
ZPageType page_type,
size_t page_size,
size_t object_size_limit,
double fragmentation_limit) :
_name(name),
double fragmentation_limit)
: _name(name),
_page_type(page_type),
_page_size(page_size),
_object_size_limit(object_size_limit),
@ -210,8 +210,8 @@ void ZRelocationSetSelectorGroup::select() {
event.commit((u8)_page_type, s._npages_candidates, s._total, s._empty, s._npages_selected, s._relocate);
}
ZRelocationSetSelector::ZRelocationSetSelector(double fragmentation_limit) :
_small("Small", ZPageType::small, ZPageSizeSmall, ZObjectSizeLimitSmall, fragmentation_limit),
ZRelocationSetSelector::ZRelocationSetSelector(double fragmentation_limit)
: _small("Small", ZPageType::small, ZPageSizeSmall, ZObjectSizeLimitSmall, fragmentation_limit),
_medium("Medium", ZPageType::medium, ZPageSizeMedium, ZObjectSizeLimitMedium, fragmentation_limit),
_large("Large", ZPageType::large, 0 /* page_size */, 0 /* object_size_limit */, fragmentation_limit),
_empty_pages() {}

View File

@ -41,12 +41,11 @@
ZRemembered::ZRemembered(ZPageTable* page_table,
const ZForwardingTable* old_forwarding_table,
ZPageAllocator* page_allocator) :
_page_table(page_table),
ZPageAllocator* page_allocator)
: _page_table(page_table),
_old_forwarding_table(old_forwarding_table),
_page_allocator(page_allocator),
_found_old() {
}
_found_old() {}
template <typename Function>
void ZRemembered::oops_do_forwarded_via_containing(GrowableArrayView<ZRememberedSetContaining>* array, Function function) const {
@ -176,8 +175,8 @@ struct ZRememberedScanForwardingContext {
Tickspan _max_durations[NumRecords];
int _max_count;
Where() :
_duration(),
Where()
: _duration(),
_count(),
_max_durations(),
_max_count() {}
@ -215,8 +214,8 @@ struct ZRememberedScanForwardingContext {
Where _where[2];
ZRememberedScanForwardingContext() :
_containing_array(),
ZRememberedScanForwardingContext()
: _containing_array(),
_where() {}
~ZRememberedScanForwardingContext() {
@ -241,8 +240,8 @@ struct ZRememberedScanForwardingMeasureRetained {
ZRememberedScanForwardingContext* _context;
Ticks _start;
ZRememberedScanForwardingMeasureRetained(ZRememberedScanForwardingContext* context) :
_context(context),
ZRememberedScanForwardingMeasureRetained(ZRememberedScanForwardingContext* context)
: _context(context),
_start(Ticks::now()) {
}
@ -257,8 +256,8 @@ struct ZRememberedScanForwardingMeasureReleased {
ZRememberedScanForwardingContext* _context;
Ticks _start;
ZRememberedScanForwardingMeasureReleased(ZRememberedScanForwardingContext* context) :
_context(context),
ZRememberedScanForwardingMeasureReleased(ZRememberedScanForwardingContext* context)
: _context(context),
_start(Ticks::now()) {
}
@ -332,11 +331,11 @@ bool ZRemembered::scan_forwarding(ZForwarding* forwarding, void* context_void) c
// slots that were found to actually contain old pages are registered in the
// active set.
ZRemembered::FoundOld::FoundOld() :
ZRemembered::FoundOld::FoundOld()
// Array initialization requires copy constructors, which CHeapBitMap
// doesn't provide. Instantiate two instances, and populate an array
// with pointers to the two instances.
_allocated_bitmap_0{ZAddressOffsetMax >> ZGranuleSizeShift, mtGC, true /* clear */},
: _allocated_bitmap_0{ZAddressOffsetMax >> ZGranuleSizeShift, mtGC, true /* clear */},
_allocated_bitmap_1{ZAddressOffsetMax >> ZGranuleSizeShift, mtGC, true /* clear */},
_bitmaps{&_allocated_bitmap_0, &_allocated_bitmap_1},
_current{0} {}
@ -388,8 +387,8 @@ private:
volatile BitMap::idx_t _claimed;
public:
ZRemsetTableIterator(ZRemembered* remembered) :
_remembered(remembered),
ZRemsetTableIterator(ZRemembered* remembered)
: _remembered(remembered),
_page_table(remembered->_page_table),
_old_forwarding_table(remembered->_old_forwarding_table),
_claimed(0) {}
@ -454,8 +453,8 @@ private:
ZRemsetTableIterator _remset_table_iterator;
public:
ZRememberedScanMarkFollowTask(ZRemembered* remembered, ZMark* mark) :
ZRestartableTask("ZRememberedScanMarkFollowTask"),
ZRememberedScanMarkFollowTask(ZRemembered* remembered, ZMark* mark)
: ZRestartableTask("ZRememberedScanMarkFollowTask"),
_remembered(remembered),
_mark(mark),
_remset_table_iterator(remembered) {

View File

@ -38,8 +38,8 @@ void ZRememberedSet::flip() {
_current ^= 1;
}
ZRememberedSet::ZRememberedSet() :
_bitmap{ZMovableBitMap(), ZMovableBitMap()} {
ZRememberedSet::ZRememberedSet()
: _bitmap{ZMovableBitMap(), ZMovableBitMap()} {
// Defer initialization of the bitmaps until the owning
// page becomes old and its remembered set is initialized.
}
@ -127,8 +127,8 @@ zaddress_unsafe ZRememberedSetContainingIterator::to_addr(BitMap::idx_t index) {
return ZOffset::address_unsafe(_page->global_offset(local_offset));
}
ZRememberedSetContainingIterator::ZRememberedSetContainingIterator(ZPage* page) :
_page(page),
ZRememberedSetContainingIterator::ZRememberedSetContainingIterator(ZPage* page)
: _page(page),
_remset_iter(page->remset_reverse_iterator_previous()),
_obj(zaddress_unsafe::null),
_obj_remset_iter(page->remset_reverse_iterator_previous()) {}
@ -190,8 +190,8 @@ bool ZRememberedSetContainingIterator::next(ZRememberedSetContaining* containing
return false;
}
ZRememberedSetContainingInLiveIterator::ZRememberedSetContainingInLiveIterator(ZPage* page) :
_iter(page),
ZRememberedSetContainingInLiveIterator::ZRememberedSetContainingInLiveIterator(ZPage* page)
: _iter(page),
_addr(zaddress::null),
_addr_size(0),
_count(0),

View File

@ -41,9 +41,9 @@ private:
ZStatSubPhase _old;
public:
ZRootStatSubPhase(const char* name) :
_young(name, ZGenerationId::young),
_old(name, ZGenerationId::old) {}
ZRootStatSubPhase(const char* name)
: _young(name, ZGenerationId::young),
_old(name, ZGenerationId::old) {}
const ZStatSubPhase& young() const { return _young; }
const ZStatSubPhase& old() const { return _old; }
@ -60,8 +60,8 @@ private:
const ZStatPhase* _phase;
const Ticks _start;
ZRootStatTimer(const ZStatPhase* phase) :
_phase(phase),
ZRootStatTimer(const ZStatPhase* phase)
: _phase(phase),
_start(Ticks::now()) {
if (phase != nullptr) {
_phase->register_start(nullptr /* timer */, _start);
@ -85,8 +85,8 @@ public:
}
public:
ZRootStatTimer(const ZRootStatSubPhase& subphase, const ZGenerationIdOptional generation) :
ZRootStatTimer(calculate_subphase(generation, subphase)) {}
ZRootStatTimer(const ZRootStatSubPhase& subphase, const ZGenerationIdOptional generation)
: ZRootStatTimer(calculate_subphase(generation, subphase)) {}
};
template <typename Iterator>
@ -137,8 +137,8 @@ void ZJavaThreadsIterator::apply(ThreadClosure* cl) {
}
}
ZNMethodsIteratorImpl::ZNMethodsIteratorImpl(ZGenerationIdOptional generation, bool enabled, bool secondary) :
_enabled(enabled),
ZNMethodsIteratorImpl::ZNMethodsIteratorImpl(ZGenerationIdOptional generation, bool enabled, bool secondary)
: _enabled(enabled),
_secondary(secondary),
_generation(generation) {
if (_enabled) {

View File

@ -37,8 +37,8 @@ private:
volatile bool _completed;
public:
ZParallelApply(ZGenerationIdOptional generation) :
_iter(generation),
ZParallelApply(ZGenerationIdOptional generation)
: _iter(generation),
_completed(false) {}
template <typename ClosureType>
@ -55,8 +55,8 @@ private:
const ZGenerationIdOptional _generation;
public:
ZOopStorageSetIteratorStrong(ZGenerationIdOptional generation) :
_iter(),
ZOopStorageSetIteratorStrong(ZGenerationIdOptional generation)
: _iter(),
_generation(generation) {}
void apply(OopClosure* cl);
@ -68,8 +68,8 @@ private:
const ZGenerationIdOptional _generation;
public:
ZOopStorageSetIteratorWeak(ZGenerationIdOptional generation) :
_iter(),
ZOopStorageSetIteratorWeak(ZGenerationIdOptional generation)
: _iter(),
_generation(generation) {}
void apply(OopClosure* cl);
@ -82,8 +82,8 @@ private:
const ZGenerationIdOptional _generation;
public:
ZCLDsIteratorStrong(ZGenerationIdOptional generation) :
_generation(generation) {}
ZCLDsIteratorStrong(ZGenerationIdOptional generation)
: _generation(generation) {}
void apply(CLDClosure* cl);
};
@ -93,8 +93,8 @@ private:
const ZGenerationIdOptional _generation;
public:
ZCLDsIteratorWeak(ZGenerationIdOptional generation) :
_generation(generation) {}
ZCLDsIteratorWeak(ZGenerationIdOptional generation)
: _generation(generation) {}
void apply(CLDClosure* cl);
};
@ -104,8 +104,8 @@ private:
const ZGenerationIdOptional _generation;
public:
ZCLDsIteratorAll(ZGenerationIdOptional generation) :
_generation(generation) {}
ZCLDsIteratorAll(ZGenerationIdOptional generation)
: _generation(generation) {}
void apply(CLDClosure* cl);
};
@ -119,8 +119,8 @@ private:
uint claim();
public:
ZJavaThreadsIterator(ZGenerationIdOptional generation) :
_threads(),
ZJavaThreadsIterator(ZGenerationIdOptional generation)
: _threads(),
_claimed(0),
_generation(generation) {}
@ -143,20 +143,20 @@ public:
class ZNMethodsIteratorStrong : public ZNMethodsIteratorImpl {
public:
ZNMethodsIteratorStrong(ZGenerationIdOptional generation) :
ZNMethodsIteratorImpl(generation, !ClassUnloading /* enabled */, false /* secondary */) {}
ZNMethodsIteratorStrong(ZGenerationIdOptional generation)
: ZNMethodsIteratorImpl(generation, !ClassUnloading /* enabled */, false /* secondary */) {}
};
class ZNMethodsIteratorWeak : public ZNMethodsIteratorImpl {
public:
ZNMethodsIteratorWeak(ZGenerationIdOptional generation) :
ZNMethodsIteratorImpl(generation, true /* enabled */, true /* secondary */) {}
ZNMethodsIteratorWeak(ZGenerationIdOptional generation)
: ZNMethodsIteratorImpl(generation, true /* enabled */, true /* secondary */) {}
};
class ZNMethodsIteratorAll : public ZNMethodsIteratorImpl {
public:
ZNMethodsIteratorAll(ZGenerationIdOptional generation) :
ZNMethodsIteratorImpl(generation, true /* enabled */, true /* secondary */) {}
ZNMethodsIteratorAll(ZGenerationIdOptional generation)
: ZNMethodsIteratorImpl(generation, true /* enabled */, true /* secondary */) {}
};
class ZRootsIteratorStrongUncolored {
@ -165,8 +165,8 @@ private:
ZParallelApply<ZNMethodsIteratorStrong> _nmethods_strong;
public:
ZRootsIteratorStrongUncolored(ZGenerationIdOptional generation) :
_java_threads(generation),
ZRootsIteratorStrongUncolored(ZGenerationIdOptional generation)
: _java_threads(generation),
_nmethods_strong(generation) {}
void apply(ThreadClosure* thread_cl,
@ -178,8 +178,8 @@ private:
ZParallelApply<ZNMethodsIteratorWeak> _nmethods_weak;
public:
ZRootsIteratorWeakUncolored(ZGenerationIdOptional generation) :
_nmethods_weak(generation) {}
ZRootsIteratorWeakUncolored(ZGenerationIdOptional generation)
: _nmethods_weak(generation) {}
void apply(NMethodClosure* nm_cl);
};
@ -190,8 +190,8 @@ private:
ZParallelApply<ZNMethodsIteratorAll> _nmethods_all;
public:
ZRootsIteratorAllUncolored(ZGenerationIdOptional generation) :
_java_threads(generation),
ZRootsIteratorAllUncolored(ZGenerationIdOptional generation)
: _java_threads(generation),
_nmethods_all(generation) {}
void apply(ThreadClosure* thread_cl,
@ -204,8 +204,8 @@ private:
ZParallelApply<ZCLDsIteratorStrong> _clds_strong;
public:
ZRootsIteratorStrongColored(ZGenerationIdOptional generation) :
_oop_storage_set_strong(generation),
ZRootsIteratorStrongColored(ZGenerationIdOptional generation)
: _oop_storage_set_strong(generation),
_clds_strong(generation) {}
void apply(OopClosure* cl,
@ -217,8 +217,8 @@ private:
ZParallelApply<ZOopStorageSetIteratorWeak> _oop_storage_set_weak;
public:
ZRootsIteratorWeakColored(ZGenerationIdOptional generation) :
_oop_storage_set_weak(generation) {}
ZRootsIteratorWeakColored(ZGenerationIdOptional generation)
: _oop_storage_set_weak(generation) {}
void apply(OopClosure* cl);
@ -232,8 +232,8 @@ private:
ZParallelApply<ZCLDsIteratorAll> _clds_all;
public:
ZRootsIteratorAllColored(ZGenerationIdOptional generation) :
_oop_storage_set_strong(generation),
ZRootsIteratorAllColored(ZGenerationIdOptional generation)
: _oop_storage_set_strong(generation),
_oop_storage_set_weak(generation),
_clds_all(generation) {}

View File

@ -27,9 +27,8 @@
#include "gc/z/zRuntimeWorkers.hpp"
#include "runtime/java.hpp"
ZRuntimeWorkers::ZRuntimeWorkers() :
_workers("RuntimeWorker",
ParallelGCThreads) {
ZRuntimeWorkers::ZRuntimeWorkers()
: _workers("RuntimeWorker", ParallelGCThreads) {
log_info_p(gc, init)("Runtime Workers: %u", _workers.max_workers());

View File

@ -31,8 +31,8 @@
#include <type_traits>
template <typename T>
ZSafeDelete<T>::ZSafeDelete(bool locked) :
_deferred(locked) {}
ZSafeDelete<T>::ZSafeDelete(bool locked)
: _deferred(locked) {}
template <typename T>
void ZSafeDelete<T>::immediate_delete(ItemT* item) {

View File

@ -53,10 +53,18 @@ static ZMemoryUsageInfo compute_memory_usage_info() {
class ZGenerationCounters : public GenerationCounters {
public:
ZGenerationCounters(const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity, size_t curr_capacity) :
GenerationCounters(name, ordinal, spaces,
min_capacity, max_capacity, curr_capacity) {}
ZGenerationCounters(const char* name,
int ordinal,
int spaces,
size_t min_capacity,
size_t max_capacity,
size_t curr_capacity)
: GenerationCounters(name,
ordinal,
spaces,
min_capacity,
max_capacity,
curr_capacity) {}
void update_capacity(size_t capacity) {
_current_size->set_value(capacity);
@ -81,8 +89,8 @@ public:
void update_sizes();
};
ZServiceabilityCounters::ZServiceabilityCounters(size_t initial_capacity, size_t min_capacity, size_t max_capacity) :
// generation.0
ZServiceabilityCounters::ZServiceabilityCounters(size_t initial_capacity, size_t min_capacity, size_t max_capacity)
: // generation.0
_generation_young_counters(
"young" /* name */,
0 /* ordinal */,
@ -141,8 +149,8 @@ void ZServiceabilityCounters::update_sizes() {
}
}
ZServiceabilityMemoryPool::ZServiceabilityMemoryPool(const char* name, ZGenerationId id, size_t min_capacity, size_t max_capacity) :
CollectedMemoryPool(name,
ZServiceabilityMemoryPool::ZServiceabilityMemoryPool(const char* name, ZGenerationId id, size_t min_capacity, size_t max_capacity)
: CollectedMemoryPool(name,
min_capacity,
max_capacity,
id == ZGenerationId::old /* support_usage_threshold */),
@ -164,16 +172,16 @@ MemoryUsage ZServiceabilityMemoryPool::get_memory_usage() {
ZServiceabilityMemoryManager::ZServiceabilityMemoryManager(const char* name,
MemoryPool* young_memory_pool,
MemoryPool* old_memory_pool) :
GCMemoryManager(name) {
MemoryPool* old_memory_pool)
: GCMemoryManager(name) {
add_pool(young_memory_pool);
add_pool(old_memory_pool);
}
ZServiceability::ZServiceability(size_t initial_capacity,
size_t min_capacity,
size_t max_capacity) :
_initial_capacity(initial_capacity),
size_t max_capacity)
: _initial_capacity(initial_capacity),
_min_capacity(min_capacity),
_max_capacity(max_capacity),
_young_memory_pool("ZGC Young Generation", ZGenerationId::young, _min_capacity, _max_capacity),
@ -182,8 +190,7 @@ ZServiceability::ZServiceability(size_t initial_capacity,
_major_cycle_memory_manager("ZGC Major Cycles", &_young_memory_pool, &_old_memory_pool),
_minor_pause_memory_manager("ZGC Minor Pauses", &_young_memory_pool, &_old_memory_pool),
_major_pause_memory_manager("ZGC Major Pauses", &_young_memory_pool, &_old_memory_pool),
_counters(nullptr) {
}
_counters(nullptr) {}
void ZServiceability::initialize() {
_counters = new ZServiceabilityCounters(_initial_capacity, _min_capacity, _max_capacity);
@ -213,8 +220,8 @@ ZServiceabilityCounters* ZServiceability::counters() {
bool ZServiceabilityCycleTracer::_minor_is_active;
ZServiceabilityCycleTracer::ZServiceabilityCycleTracer(bool minor) :
_memory_manager_stats(ZHeap::heap()->serviceability_cycle_memory_manager(minor),
ZServiceabilityCycleTracer::ZServiceabilityCycleTracer(bool minor)
: _memory_manager_stats(ZHeap::heap()->serviceability_cycle_memory_manager(minor),
minor ? ZDriver::minor()->gc_cause() : ZDriver::major()->gc_cause(),
"end of GC cycle",
true /* allMemoryPoolsAffected */,
@ -244,8 +251,8 @@ bool ZServiceabilityPauseTracer::minor_is_active() const {
return ZServiceabilityCycleTracer::minor_is_active();
}
ZServiceabilityPauseTracer::ZServiceabilityPauseTracer() :
_svc_gc_marker(SvcGCMarker::CONCURRENT),
ZServiceabilityPauseTracer::ZServiceabilityPauseTracer()
: _svc_gc_marker(SvcGCMarker::CONCURRENT),
_counters_stats(ZHeap::heap()->serviceability_counters()->collector_counters(minor_is_active())),
_memory_manager_stats(ZHeap::heap()->serviceability_pause_memory_manager(minor_is_active()),
minor_is_active() ? ZDriver::minor()->gc_cause() : ZDriver::major()->gc_cause(),

View File

@ -37,8 +37,8 @@
#include "runtime/thread.hpp"
#include "utilities/preserveException.hpp"
ZOnStackCodeBlobClosure::ZOnStackCodeBlobClosure() :
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
ZOnStackCodeBlobClosure::ZOnStackCodeBlobClosure()
: _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
void ZOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
@ -56,8 +56,8 @@ uint32_t ZStackWatermark::epoch_id() const {
return *ZPointerStoreGoodMaskLowOrderBitsAddr;
}
ZStackWatermark::ZStackWatermark(JavaThread* jt) :
StackWatermark(jt, StackWatermarkKind::gc, *ZPointerStoreGoodMaskLowOrderBitsAddr),
ZStackWatermark::ZStackWatermark(JavaThread* jt)
: StackWatermark(jt, StackWatermarkKind::gc, *ZPointerStoreGoodMaskLowOrderBitsAddr),
// First watermark is fake and setup to be replaced at next phase shift
_old_watermarks{{ZPointerStoreBadMask, 1}, {}, {}},
_old_watermarks_newest(0),
@ -154,8 +154,8 @@ private:
}
public:
ZStackWatermarkProcessOopClosure(void* context, uintptr_t color) :
_function(select_function(context)), _color(color) {}
ZStackWatermarkProcessOopClosure(void* context, uintptr_t color)
: _function(select_function(context)), _color(color) {}
virtual void do_root(zaddress_unsafe* p) {
_function(p, _color);

View File

@ -62,10 +62,10 @@ struct ZStatSamplerData {
uint64_t _sum;
uint64_t _max;
ZStatSamplerData() :
_nsamples(0),
_sum(0),
_max(0) {}
ZStatSamplerData()
: _nsamples(0),
_sum(0),
_max(0) {}
void add(const ZStatSamplerData& new_sample) {
_nsamples += new_sample._nsamples;
@ -77,8 +77,8 @@ struct ZStatSamplerData {
struct ZStatCounterData {
uint64_t _counter;
ZStatCounterData() :
_counter(0) {}
ZStatCounterData()
: _counter(0) {}
};
//
@ -93,8 +93,8 @@ private:
ZStatSamplerData _total;
public:
ZStatSamplerHistoryInterval() :
_next(0),
ZStatSamplerHistoryInterval()
: _next(0),
_samples(),
_accumulated(),
_total() {}
@ -164,8 +164,8 @@ private:
}
public:
ZStatSamplerHistory() :
_10seconds(),
ZStatSamplerHistory()
: _10seconds(),
_10minutes(),
_10hours(),
_total() {}
@ -341,8 +341,8 @@ uint32_t ZStatValue::_cpu_offset = 0;
ZStatValue::ZStatValue(const char* group,
const char* name,
uint32_t id,
uint32_t size) :
_group(group),
uint32_t size)
: _group(group),
_name(name),
_id(id),
_offset(_cpu_offset) {
@ -428,8 +428,8 @@ void ZStatIterableValue<T>::sort() {
//
// Stat sampler
//
ZStatSampler::ZStatSampler(const char* group, const char* name, ZStatUnitPrinter printer) :
ZStatIterableValue<ZStatSampler>(group, name, sizeof(ZStatSamplerData)),
ZStatSampler::ZStatSampler(const char* group, const char* name, ZStatUnitPrinter printer)
: ZStatIterableValue<ZStatSampler>(group, name, sizeof(ZStatSamplerData)),
_printer(printer) {}
ZStatSamplerData* ZStatSampler::get() const {
@ -464,8 +464,8 @@ ZStatUnitPrinter ZStatSampler::printer() const {
//
// Stat counter
//
ZStatCounter::ZStatCounter(const char* group, const char* name, ZStatUnitPrinter printer) :
ZStatIterableValue<ZStatCounter>(group, name, sizeof(ZStatCounterData)),
ZStatCounter::ZStatCounter(const char* group, const char* name, ZStatUnitPrinter printer)
: ZStatIterableValue<ZStatCounter>(group, name, sizeof(ZStatCounterData)),
_sampler(group, name, printer) {}
ZStatCounterData* ZStatCounter::get() const {
@ -487,8 +487,8 @@ void ZStatCounter::sample_and_reset() const {
//
// Stat unsampled counter
//
ZStatUnsampledCounter::ZStatUnsampledCounter(const char* name) :
ZStatIterableValue<ZStatUnsampledCounter>("Unsampled", name, sizeof(ZStatCounterData)) {}
ZStatUnsampledCounter::ZStatUnsampledCounter(const char* name)
: ZStatIterableValue<ZStatUnsampledCounter>("Unsampled", name, sizeof(ZStatCounterData)) {}
ZStatCounterData* ZStatUnsampledCounter::get() const {
return get_cpu_local<ZStatCounterData>(ZCPU::id());
@ -509,12 +509,12 @@ ZStatCounterData ZStatUnsampledCounter::collect_and_reset() const {
//
// Stat MMU (Minimum Mutator Utilization)
//
ZStatMMUPause::ZStatMMUPause() :
_start(0.0),
ZStatMMUPause::ZStatMMUPause()
: _start(0.0),
_end(0.0) {}
ZStatMMUPause::ZStatMMUPause(const Ticks& start, const Ticks& end) :
_start(TimeHelper::counter_to_millis(start.value())),
ZStatMMUPause::ZStatMMUPause(const Ticks& start, const Ticks& end)
: _start(TimeHelper::counter_to_millis(start.value())),
_end(TimeHelper::counter_to_millis(end.value())) {}
double ZStatMMUPause::end() const {
@ -593,8 +593,8 @@ void ZStatMMU::print() {
// Stat phases
//
ZStatPhase::ZStatPhase(const char* group, const char* name) :
_sampler(group, name, ZStatUnitTime) {}
ZStatPhase::ZStatPhase(const char* group, const char* name)
: _sampler(group, name, ZStatUnitTime) {}
void ZStatPhase::log_start(LogTargetHandle log, bool thread) const {
if (!log.is_enabled()) {
@ -626,8 +626,8 @@ const char* ZStatPhase::name() const {
return _sampler.name();
}
ZStatPhaseCollection::ZStatPhaseCollection(const char* name, bool minor) :
ZStatPhase(minor ? "Minor Collection" : "Major Collection", name),
ZStatPhaseCollection::ZStatPhaseCollection(const char* name, bool minor)
: ZStatPhase(minor ? "Minor Collection" : "Major Collection", name),
_minor(minor) {}
GCTracer* ZStatPhaseCollection::jfr_tracer() const {
@ -689,8 +689,8 @@ void ZStatPhaseCollection::register_end(ConcurrentGCTimer* timer, const Ticks& s
duration.seconds());
}
ZStatPhaseGeneration::ZStatPhaseGeneration(const char* name, ZGenerationId id) :
ZStatPhase(id == ZGenerationId::old ? "Old Generation" : "Young Generation", name),
ZStatPhaseGeneration::ZStatPhaseGeneration(const char* name, ZGenerationId id)
: ZStatPhase(id == ZGenerationId::old ? "Old Generation" : "Young Generation", name),
_id(id) {}
ZGenerationTracer* ZStatPhaseGeneration::jfr_tracer() const {
@ -748,8 +748,8 @@ void ZStatPhaseGeneration::register_end(ConcurrentGCTimer* timer, const Ticks& s
Tickspan ZStatPhasePause::_max;
ZStatPhasePause::ZStatPhasePause(const char* name, ZGenerationId id) :
ZStatPhase(id == ZGenerationId::young ? "Young Pause" : "Old Pause", name) {}
ZStatPhasePause::ZStatPhasePause(const char* name, ZGenerationId id)
: ZStatPhase(id == ZGenerationId::young ? "Young Pause" : "Old Pause", name) {}
const Tickspan& ZStatPhasePause::max() {
return _max;
@ -780,8 +780,8 @@ void ZStatPhasePause::register_end(ConcurrentGCTimer* timer, const Ticks& start,
log_end(log, duration);
}
ZStatPhaseConcurrent::ZStatPhaseConcurrent(const char* name, ZGenerationId id) :
ZStatPhase(id == ZGenerationId::young ? "Young Phase" : "Old Phase", name) {}
ZStatPhaseConcurrent::ZStatPhaseConcurrent(const char* name, ZGenerationId id)
: ZStatPhase(id == ZGenerationId::young ? "Young Phase" : "Old Phase", name) {}
void ZStatPhaseConcurrent::register_start(ConcurrentGCTimer* timer, const Ticks& start) const {
timer->register_gc_concurrent_start(name(), start);
@ -804,8 +804,8 @@ void ZStatPhaseConcurrent::register_end(ConcurrentGCTimer* timer, const Ticks& s
log_end(log, duration);
}
ZStatSubPhase::ZStatSubPhase(const char* name, ZGenerationId id) :
ZStatPhase(id == ZGenerationId::young ? "Young Subphase" : "Old Subphase", name) {}
ZStatSubPhase::ZStatSubPhase(const char* name, ZGenerationId id)
: ZStatPhase(id == ZGenerationId::young ? "Young Subphase" : "Old Subphase", name) {}
void ZStatSubPhase::register_start(ConcurrentGCTimer* timer, const Ticks& start) const {
if (timer != nullptr) {
@ -846,8 +846,8 @@ void ZStatSubPhase::register_end(ConcurrentGCTimer* timer, const Ticks& start, c
}
}
ZStatCriticalPhase::ZStatCriticalPhase(const char* name, bool verbose) :
ZStatPhase("Critical", name),
ZStatCriticalPhase::ZStatCriticalPhase(const char* name, bool verbose)
: ZStatPhase("Critical", name),
_counter("Critical", name, ZStatUnitOpsPerSecond),
_verbose(verbose) {}
@ -874,14 +874,14 @@ void ZStatCriticalPhase::register_end(ConcurrentGCTimer* timer, const Ticks& sta
}
}
ZStatTimerYoung::ZStatTimerYoung(const ZStatPhase& phase) :
ZStatTimer(phase, ZGeneration::young()->gc_timer()) {}
ZStatTimerYoung::ZStatTimerYoung(const ZStatPhase& phase)
: ZStatTimer(phase, ZGeneration::young()->gc_timer()) {}
ZStatTimerOld::ZStatTimerOld(const ZStatPhase& phase) :
ZStatTimer(phase, ZGeneration::old()->gc_timer()) {}
ZStatTimerOld::ZStatTimerOld(const ZStatPhase& phase)
: ZStatTimer(phase, ZGeneration::old()->gc_timer()) {}
ZStatTimerWorker::ZStatTimerWorker(const ZStatPhase& phase) :
ZStatTimer(phase, nullptr /* gc_timer */) {
ZStatTimerWorker::ZStatTimerWorker(const ZStatPhase& phase)
: ZStatTimer(phase, nullptr /* gc_timer */) {
assert(Thread::current()->is_Worker_thread(), "Should only be called by worker thread");
}
@ -1014,8 +1014,8 @@ ZStatMutatorAllocRateStats ZStatMutatorAllocRate::stats() {
//
// Stat thread
//
ZStat::ZStat() :
_metronome(sample_hz) {
ZStat::ZStat()
: _metronome(sample_hz) {
set_name("ZStat");
create_and_start();
ZStatMutatorAllocRate::initialize();
@ -1124,8 +1124,8 @@ public:
}
public:
ZColumn(char* buffer, size_t position, size_t width, size_t width_next) :
_buffer(buffer),
ZColumn(char* buffer, size_t position, size_t width, size_t width_next)
: _buffer(buffer),
_position(position),
_width(width),
_width_next(width_next) {}
@ -1206,8 +1206,8 @@ public:
};
public:
ZStatTablePrinter(size_t column0_width, size_t columnN_width) :
_column0_width(column0_width),
ZStatTablePrinter(size_t column0_width, size_t columnN_width)
: _column0_width(column0_width),
_columnN_width(columnN_width) {}
ZColumn operator()() {
@ -1218,8 +1218,8 @@ public:
//
// Stat cycle
//
ZStatCycle::ZStatCycle() :
_stat_lock(),
ZStatCycle::ZStatCycle()
: _stat_lock(),
_nwarmup_cycles(0),
_start_of_last(),
_end_of_last(),
@ -1227,8 +1227,7 @@ ZStatCycle::ZStatCycle() :
_serial_time(0.7 /* alpha */),
_parallelizable_time(0.7 /* alpha */),
_parallelizable_duration(0.7 /* alpha */),
_last_active_workers(0.0) {
}
_last_active_workers(0.0) {}
void ZStatCycle::at_start() {
ZLocker<ZLock> locker(&_stat_lock);
@ -1323,8 +1322,8 @@ ZStatCycleStats ZStatCycle::stats() {
//
// Stat workers
//
ZStatWorkers::ZStatWorkers() :
_stat_lock(),
ZStatWorkers::ZStatWorkers()
: _stat_lock(),
_active_workers(0),
_start_of_last(),
_accumulated_duration(),
@ -1415,14 +1414,13 @@ void ZStatLoad::print() {
//
// Stat mark
//
ZStatMark::ZStatMark() :
_nstripes(),
ZStatMark::ZStatMark()
: _nstripes(),
_nproactiveflush(),
_nterminateflush(),
_ntrycomplete(),
_ncontinue(),
_mark_stack_usage() {
}
_mark_stack_usage() {}
void ZStatMark::at_mark_start(size_t nstripes) {
_nstripes = nstripes;
@ -1461,14 +1459,13 @@ void ZStatMark::print() {
//
// Stat relocation
//
ZStatRelocation::ZStatRelocation() :
_selector_stats(),
ZStatRelocation::ZStatRelocation()
: _selector_stats(),
_forwarding_usage(),
_small_selected(),
_small_in_place_count(),
_medium_selected(),
_medium_in_place_count() {
}
_medium_in_place_count() {}
void ZStatRelocation::at_select_relocation_set(const ZRelocationSetSelectorStats& selector_stats) {
_selector_stats = selector_stats;
@ -1706,15 +1703,14 @@ void ZStatReferences::print() {
// Stat heap
//
ZStatHeap::ZStatHeap() :
_stat_lock(),
ZStatHeap::ZStatHeap()
: _stat_lock(),
_at_collection_start(),
_at_mark_start(),
_at_mark_end(),
_at_relocate_start(),
_at_relocate_end(),
_reclaimed_bytes(0.7 /* alpha */) {
}
_reclaimed_bytes(0.7 /* alpha */) {}
ZStatHeap::ZAtInitialize ZStatHeap::_at_initialize;

View File

@ -305,19 +305,19 @@ private:
const Ticks _start;
public:
ZStatTimer(const ZStatPhase& phase, ConcurrentGCTimer* gc_timer) :
_gc_timer(gc_timer),
ZStatTimer(const ZStatPhase& phase, ConcurrentGCTimer* gc_timer)
: _gc_timer(gc_timer),
_phase(phase),
_start(Ticks::now()) {
_phase.register_start(_gc_timer, _start);
}
ZStatTimer(const ZStatSubPhase& phase) :
ZStatTimer(phase, nullptr /* timer */) {
ZStatTimer(const ZStatSubPhase& phase)
: ZStatTimer(phase, nullptr /* timer */) {
}
ZStatTimer(const ZStatCriticalPhase& phase) :
ZStatTimer(phase, nullptr /* timer */) {
ZStatTimer(const ZStatCriticalPhase& phase)
: ZStatTimer(phase, nullptr /* timer */) {
}
~ZStatTimer() {

View File

@ -49,14 +49,13 @@ ByteSize ZStoreBarrierBuffer::current_offset() {
return byte_offset_of(ZStoreBarrierBuffer, _current);
}
ZStoreBarrierBuffer::ZStoreBarrierBuffer() :
_buffer(),
ZStoreBarrierBuffer::ZStoreBarrierBuffer()
: _buffer(),
_last_processed_color(),
_last_installed_color(),
_base_pointer_lock(),
_base_pointers(),
_current(ZBufferStoreBarriers ? _buffer_size_bytes : 0) {
}
_current(ZBufferStoreBarriers ? _buffer_size_bytes : 0) {}
void ZStoreBarrierBuffer::initialize() {
_last_processed_color = ZPointerStoreGoodMask;
@ -247,8 +246,8 @@ private:
ZStoreBarrierBuffer* _buffer;
public:
OnError(ZStoreBarrierBuffer* buffer) :
_buffer(buffer) {}
OnError(ZStoreBarrierBuffer* buffer)
: _buffer(buffer) {}
virtual void call(outputStream* st) {
_buffer->on_error(st);

View File

@ -24,16 +24,16 @@
#include "precompiled.hpp"
#include "gc/z/zTask.hpp"
ZTask::Task::Task(ZTask* task, const char* name) :
WorkerTask(name),
ZTask::Task::Task(ZTask* task, const char* name)
: WorkerTask(name),
_task(task) {}
void ZTask::Task::work(uint worker_id) {
_task->work();
}
ZTask::ZTask(const char* name) :
_worker_task(this, name) {}
ZTask::ZTask(const char* name)
: _worker_task(this, name) {}
const char* ZTask::name() const {
return _worker_task.name();
@ -43,7 +43,7 @@ WorkerTask* ZTask::worker_task() {
return &_worker_task;
}
ZRestartableTask::ZRestartableTask(const char* name) :
ZTask(name) {}
ZRestartableTask::ZRestartableTask(const char* name)
: ZTask(name) {}
void ZRestartableTask::resize_workers(uint nworkers) {}

View File

@ -45,8 +45,8 @@ private:
ZMarkThreadLocalStacks _mark_stacks[2];
zaddress_unsafe* _invisible_root;
ZThreadLocalData() :
_load_good_mask(0),
ZThreadLocalData()
: _load_good_mask(0),
_load_bad_mask(0),
_mark_bad_mask(0),
_store_good_mask(0),

View File

@ -87,12 +87,11 @@ static void register_jfr_type_serializers() {
#endif // INCLUDE_JFR
ZMinorTracer::ZMinorTracer() :
GCTracer(ZMinor) {
}
ZMinorTracer::ZMinorTracer()
: GCTracer(ZMinor) {}
ZMajorTracer::ZMajorTracer() :
GCTracer(ZMajor) {}
ZMajorTracer::ZMajorTracer()
: GCTracer(ZMajor) {}
void ZGenerationTracer::report_start(const Ticks& timestamp) {
_start = timestamp;

View File

@ -62,8 +62,8 @@ protected:
Ticks _start;
public:
ZGenerationTracer() :
_start() {}
ZGenerationTracer()
: _start() {}
void report_start(const Ticks& timestamp);
virtual void report_end(const Ticks& timestamp) = 0;

View File

@ -52,8 +52,8 @@ inline void ZTracer::report_thread_debug(const char* name, const Ticks& start, c
}
}
inline ZTraceThreadDebug::ZTraceThreadDebug(const char* name) :
_start(Ticks::now()),
inline ZTraceThreadDebug::ZTraceThreadDebug(const char* name)
: _start(Ticks::now()),
_name(name) {}
inline ZTraceThreadDebug::~ZTraceThreadDebug() {

View File

@ -115,36 +115,36 @@ inline zaddress_unsafe* ZUncoloredRoot::cast(oop* p) {
return root;
}
inline ZUncoloredRootMarkOopClosure::ZUncoloredRootMarkOopClosure(uintptr_t color) :
_color(color) {}
inline ZUncoloredRootMarkOopClosure::ZUncoloredRootMarkOopClosure(uintptr_t color)
: _color(color) {}
inline void ZUncoloredRootMarkOopClosure::do_root(zaddress_unsafe* p) {
ZUncoloredRoot::mark(p, _color);
}
inline ZUncoloredRootMarkYoungOopClosure::ZUncoloredRootMarkYoungOopClosure(uintptr_t color) :
_color(color) {}
inline ZUncoloredRootMarkYoungOopClosure::ZUncoloredRootMarkYoungOopClosure(uintptr_t color)
: _color(color) {}
inline void ZUncoloredRootMarkYoungOopClosure::do_root(zaddress_unsafe* p) {
ZUncoloredRoot::mark_young(p, _color);
}
inline ZUncoloredRootProcessOopClosure::ZUncoloredRootProcessOopClosure(uintptr_t color) :
_color(color) {}
inline ZUncoloredRootProcessOopClosure::ZUncoloredRootProcessOopClosure(uintptr_t color)
: _color(color) {}
inline void ZUncoloredRootProcessOopClosure::do_root(zaddress_unsafe* p) {
ZUncoloredRoot::process(p, _color);
}
inline ZUncoloredRootProcessWeakOopClosure::ZUncoloredRootProcessWeakOopClosure(uintptr_t color) :
_color(color) {}
inline ZUncoloredRootProcessWeakOopClosure::ZUncoloredRootProcessWeakOopClosure(uintptr_t color)
: _color(color) {}
inline void ZUncoloredRootProcessWeakOopClosure::do_root(zaddress_unsafe* p) {
ZUncoloredRoot::process_weak(p, _color);
}
inline ZUncoloredRootProcessNoKeepaliveOopClosure::ZUncoloredRootProcessNoKeepaliveOopClosure(uintptr_t color) :
_color(color) {}
inline ZUncoloredRootProcessNoKeepaliveOopClosure::ZUncoloredRootProcessNoKeepaliveOopClosure(uintptr_t color)
: _color(color) {}
inline void ZUncoloredRootProcessNoKeepaliveOopClosure::do_root(zaddress_unsafe* p) {
ZUncoloredRoot::process_no_keepalive(p, _color);

View File

@ -32,8 +32,8 @@
static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
ZUncommitter::ZUncommitter(ZPageAllocator* page_allocator) :
_page_allocator(page_allocator),
ZUncommitter::ZUncommitter(ZPageAllocator* page_allocator)
: _page_allocator(page_allocator),
_lock(),
_stop(false) {
set_name("ZUncommitter");

View File

@ -49,8 +49,8 @@ private:
bool _is_unloading;
public:
ZIsUnloadingOopClosure(nmethod* nm) :
_color(ZNMethod::color(nm)),
ZIsUnloadingOopClosure(nmethod* nm)
: _color(ZNMethod::color(nm)),
_is_unloading(false) {}
virtual void do_oop(oop* p) {
@ -114,8 +114,8 @@ public:
}
};
ZUnload::ZUnload(ZWorkers* workers) :
_workers(workers) {
ZUnload::ZUnload(ZWorkers* workers)
: _workers(workers) {
if (!ClassUnloading) {
return;

View File

@ -31,8 +31,8 @@
#include "jfr/jfrEvents.hpp"
#include "runtime/globals.hpp"
ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator) :
_page_allocator(page_allocator),
ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator)
: _page_allocator(page_allocator),
_lock(),
_queue(),
_stop(false) {

View File

@ -123,8 +123,8 @@ inline uintptr_t ZValue<S, T>::value_addr(uint32_t value_id) const {
}
template <typename S, typename T>
inline ZValue<S, T>::ZValue() :
_addr(S::alloc(sizeof(T))) {
inline ZValue<S, T>::ZValue()
: _addr(S::alloc(sizeof(T))) {
// Initialize all instances
ZValueIterator<S, T> iter(this);
for (T* addr; iter.next(&addr);) {
@ -133,8 +133,8 @@ inline ZValue<S, T>::ZValue() :
}
template <typename S, typename T>
inline ZValue<S, T>::ZValue(const T& value) :
_addr(S::alloc(sizeof(T))) {
inline ZValue<S, T>::ZValue(const T& value)
: _addr(S::alloc(sizeof(T))) {
// Initialize all instances
ZValueIterator<S, T> iter(this);
for (T* addr; iter.next(&addr);) {
@ -180,8 +180,8 @@ inline void ZValue<S, T>::set_all(const T& value) {
//
template <typename S, typename T>
inline ZValueIterator<S, T>::ZValueIterator(ZValue<S, T>* value) :
_value(value),
inline ZValueIterator<S, T>::ZValueIterator(ZValue<S, T>* value)
: _value(value),
_value_id(0) {}
template <typename S, typename T>
@ -194,8 +194,8 @@ inline bool ZValueIterator<S, T>::next(T** value) {
}
template <typename S, typename T>
inline ZValueConstIterator<S, T>::ZValueConstIterator(const ZValue<S, T>* value) :
_value(value),
inline ZValueConstIterator<S, T>::ZValueConstIterator(const ZValue<S, T>* value)
: _value(value),
_value_id(0) {}
template <typename S, typename T>

View File

@ -136,8 +136,8 @@ private:
const bool _verify_marked_old;
public:
ZVerifyColoredRootClosure(bool verify_marked_old) :
OopClosure(),
ZVerifyColoredRootClosure(bool verify_marked_old)
: OopClosure(),
_verify_marked_old(verify_marked_old) {}
virtual void do_oop(oop* p_) {
@ -191,8 +191,8 @@ public:
class ZVerifyCodeBlobClosure : public CodeBlobToOopClosure {
public:
ZVerifyCodeBlobClosure(OopClosure* cl) :
CodeBlobToOopClosure(cl, false /* fix_relocations */) {}
ZVerifyCodeBlobClosure(OopClosure* cl)
: CodeBlobToOopClosure(cl, false /* fix_relocations */) {}
virtual void do_code_blob(CodeBlob* cb) {
CodeBlobToOopClosure::do_code_blob(cb);
@ -204,8 +204,8 @@ private:
const bool _verify_weaks;
public:
ZVerifyOldOopClosure(bool verify_weaks) :
_verify_weaks(verify_weaks) {}
ZVerifyOldOopClosure(bool verify_weaks)
: _verify_weaks(verify_weaks) {}
virtual void do_oop(oop* p_) {
zpointer* const p = (zpointer*)p_;
@ -232,8 +232,8 @@ private:
const bool _verify_weaks;
public:
ZVerifyYoungOopClosure(bool verify_weaks) :
_verify_weaks(verify_weaks) {}
ZVerifyYoungOopClosure(bool verify_weaks)
: _verify_weaks(verify_weaks) {}
virtual void do_oop(oop* p_) {
zpointer* const p = (zpointer*)p_;
@ -267,8 +267,8 @@ private:
OopClosure* const _verify_cl;
public:
ZVerifyThreadClosure(OopClosure* verify_cl) :
_verify_cl(verify_cl) {}
ZVerifyThreadClosure(OopClosure* verify_cl)
: _verify_cl(verify_cl) {}
virtual void do_thread(Thread* thread) {
JavaThread* const jt = JavaThread::cast(thread);
@ -289,8 +289,8 @@ private:
BarrierSetNMethod* const _bs_nm;
public:
ZVerifyNMethodClosure(OopClosure* cl) :
_cl(cl),
ZVerifyNMethodClosure(OopClosure* cl)
: _cl(cl),
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
virtual void do_nmethod(nmethod* nm) {
@ -346,8 +346,8 @@ private:
zpointer _visited_ptr_pre_loaded;
public:
ZVerifyObjectClosure(bool verify_weaks) :
_verify_weaks(verify_weaks),
ZVerifyObjectClosure(bool verify_weaks)
: _verify_weaks(verify_weaks),
_visited_base(),
_visited_p(),
_visited_ptr_pre_loaded() {}
@ -480,8 +480,8 @@ private:
zaddress_unsafe _from_addr;
public:
ZVerifyRemsetBeforeOopClosure(ZForwarding* forwarding) :
_forwarding(forwarding),
ZVerifyRemsetBeforeOopClosure(ZForwarding* forwarding)
: _forwarding(forwarding),
_from_addr(zaddress_unsafe::null) {}
void set_from_addr(zaddress_unsafe addr) {
@ -586,8 +586,8 @@ private:
zaddress _to_addr;
public:
ZVerifyRemsetAfterOopClosure(ZForwarding* forwarding) :
_forwarding(forwarding),
ZVerifyRemsetAfterOopClosure(ZForwarding* forwarding)
: _forwarding(forwarding),
_from_addr(zaddress_unsafe::null),
_to_addr(zaddress::null) {}

View File

@ -31,8 +31,8 @@
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
_manager(),
ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity)
: _manager(),
_reserved(0),
_initialized(false) {

View File

@ -28,12 +28,12 @@
#include "gc/z/zMemory.inline.hpp"
inline ZVirtualMemory::ZVirtualMemory() :
_start(zoffset(UINTPTR_MAX)),
inline ZVirtualMemory::ZVirtualMemory()
: _start(zoffset(UINTPTR_MAX)),
_end(zoffset_end(UINTPTR_MAX)) {}
inline ZVirtualMemory::ZVirtualMemory(zoffset start, size_t size) :
_start(start),
inline ZVirtualMemory::ZVirtualMemory(zoffset start, size_t size)
: _start(start),
_end(to_zoffset_end(start, size)) {}
inline bool ZVirtualMemory::is_null() const {

View File

@ -46,16 +46,16 @@ public:
}
};
ZWeakRootsProcessor::ZWeakRootsProcessor(ZWorkers* workers) :
_workers(workers) {}
ZWeakRootsProcessor::ZWeakRootsProcessor(ZWorkers* workers)
: _workers(workers) {}
class ZProcessWeakRootsTask : public ZTask {
private:
ZRootsIteratorWeakColored _roots_weak_colored;
public:
ZProcessWeakRootsTask() :
ZTask("ZProcessWeakRootsTask"),
ZProcessWeakRootsTask()
: ZTask("ZProcessWeakRootsTask"),
_roots_weak_colored(ZGenerationIdOptional::old) {}
~ZProcessWeakRootsTask() {

View File

@ -43,8 +43,8 @@ static uint max_nworkers(ZGenerationId id) {
return id == ZGenerationId::young ? ZYoungGCThreads : ZOldGCThreads;
}
ZWorkers::ZWorkers(ZGenerationId id, ZStatWorkers* stats) :
_workers(workers_name(id),
ZWorkers::ZWorkers(ZGenerationId id, ZStatWorkers* stats)
: _workers(workers_name(id),
max_nworkers(id)),
_generation_name(generation_name(id)),
_resize_lock(),

View File

@ -35,8 +35,8 @@ private:
ZListNode<ZTestEntry> _node;
public:
ZTestEntry(int id) :
_id(id),
ZTestEntry(int id)
: _id(id),
_node() {}
int id() const {

View File

@ -32,8 +32,8 @@ private:
const size_t _old_mask;
public:
ZAddressOffsetMaxSetter() :
_old_max(ZAddressOffsetMax),
ZAddressOffsetMaxSetter()
: _old_max(ZAddressOffsetMax),
_old_mask(ZAddressOffsetMask) {
ZAddressOffsetMax = size_t(16) * G * 1024;
ZAddressOffsetMask = ZAddressOffsetMax - 1;