diff --git a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp index 14c338dd212..8dfd4524dfe 100644 --- a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp @@ -54,7 +54,7 @@ public: virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register base, RegisterOrConstant ind_or_offs, Register dst, Register tmp1, Register tmp2, - MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL); + MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr); #ifdef ASSERT virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, diff --git a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad index dd46b46a3a3..644fb3def38 100644 --- a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad +++ b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad @@ -59,7 +59,7 @@ static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node, // z-specific load barrier requires strong CAS operations. // Weak CAS operations are thus only emitted if the barrier is elided. __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, weak && node->barrier_data() == XLoadBarrierElided); if (node->barrier_data() != XLoadBarrierElided) { @@ -73,7 +73,7 @@ static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node, x_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */); __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, weak); + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, weak); __ bind(skip_barrier); } @@ -95,7 +95,7 @@ static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node, // z-specific load barrier requires strong CAS operations. // Weak CAS operations are thus only emitted if the barrier is elided. __ cmpxchgd(CCR0, res, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, weak && node->barrier_data() == XLoadBarrierElided); if (node->barrier_data() != XLoadBarrierElided) { @@ -107,7 +107,7 @@ static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node, x_load_barrier_slow_path(_masm, node, Address(mem), res, tmp); __ cmpxchgd(CCR0, res, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, weak); + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, weak); __ bind(skip_barrier); } diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp index ddc997900df..caf7eb2e514 100644 --- a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp +++ b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp @@ -97,7 +97,7 @@ private: public: LIR_OpXLoadBarrierTest(LIR_Opr opr) : - LIR_Op(lir_xloadbarrier_test, LIR_OprFact::illegalOpr, NULL), + LIR_Op(lir_xloadbarrier_test, LIR_OprFact::illegalOpr, nullptr), _opr(opr) {} virtual void visit(LIR_OpVisitState* state) { @@ -125,8 +125,8 @@ static bool barrier_needed(LIRAccess& access) { } XBarrierSetC1::XBarrierSetC1() : - _load_barrier_on_oop_field_preloaded_runtime_stub(NULL), - _load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {} + _load_barrier_on_oop_field_preloaded_runtime_stub(nullptr), + _load_barrier_on_weak_oop_field_preloaded_runtime_stub(nullptr) {} address XBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const { assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator"); @@ -189,8 +189,8 @@ static void pre_load_barrier(LIRAccess& access) { access.base().item(), access.offset().opr(), access.gen()->new_register(access.type()), - NULL /* patch_emit_info */, - NULL /* load_emit_info */); + nullptr /* patch_emit_info */, + nullptr /* load_emit_info */); } LIR_Opr XBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { @@ -219,7 +219,7 @@ public: virtual OopMapSet* generate_code(StubAssembler* sasm) { XBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators); - return NULL; + return nullptr; } }; diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp index 5ec0558cc78..d006b37e7d2 100644 --- a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp +++ b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp @@ -51,7 +51,7 @@ private: public: XBarrierSetC2State(Arena* arena) : - _stubs(new (arena) GrowableArray(arena, 8, 0, NULL)), + _stubs(new (arena) GrowableArray(arena, 8, 0, nullptr)), _live(arena) {} GrowableArray* stubs() { @@ -61,17 +61,17 @@ public: RegMask* live(const Node* node) { if (!node->is_Mach()) { // Don't need liveness for non-MachNodes - return NULL; + return nullptr; } const MachNode* const mach = node->as_Mach(); if (mach->barrier_data() == XLoadBarrierElided) { // Don't need liveness data for nodes without barriers - return NULL; + return nullptr; } RegMask* live = (RegMask*)_live[node->_idx]; - if (live == NULL) { + if (live == nullptr) { live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask(); _live.map(node->_idx, (Node*)live); } @@ -136,7 +136,7 @@ address XLoadBarrierStubC2::slow_path() const { RegMask& XLoadBarrierStubC2::live() const { RegMask* mask = barrier_set_state()->live(_node); - assert(mask != NULL, "must be mach-node with barrier"); + assert(mask != nullptr, "must be mach-node with barrier"); return *mask; } @@ -167,7 +167,7 @@ void XBarrierSetC2::emit_stubs(CodeBuffer& cb) const { for (int i = 0; i < stubs->length(); i++) { // Make sure there is enough space in the code buffer - if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) { + if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) { ciEnv::current()->record_failure("CodeCache is full"); return; } @@ -272,7 +272,7 @@ void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a Node* const src = ac->in(ArrayCopyNode::Src); const TypeAryPtr* ary_ptr = src->get_ptr_type()->isa_aryptr(); - if (ac->is_clone_array() && ary_ptr != NULL) { + if (ac->is_clone_array() && ary_ptr != nullptr) { BasicType bt = ary_ptr->elem()->array_element_basic_type(); if (is_reference_type(bt)) { // Clone object array @@ -309,7 +309,7 @@ void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a Node* payload_dst = phase->basic_plus_adr(dest, dest_offset); const char* copyfunc_name = "arraycopy"; - address copyfunc_addr = phase->basictype2arraycopy(bt, NULL, NULL, true, copyfunc_name, true); + address copyfunc_addr = phase->basictype2arraycopy(bt, nullptr, nullptr, true, copyfunc_name, true); const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); @@ -425,7 +425,7 @@ void XBarrierSetC2::analyze_dominating_barriers() const { // Step 2 - Find dominating accesses for each load for (uint i = 0; i < barrier_loads.size(); i++) { MachNode* const load = barrier_loads.at(i)->as_Mach(); - const TypePtr* load_adr_type = NULL; + const TypePtr* load_adr_type = nullptr; intptr_t load_offset = 0; const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type); Block* const load_block = cfg->get_block_for_node(load); @@ -433,14 +433,14 @@ void XBarrierSetC2::analyze_dominating_barriers() const { for (uint j = 0; j < mem_ops.size(); j++) { MachNode* mem = mem_ops.at(j)->as_Mach(); - const TypePtr* mem_adr_type = NULL; + const TypePtr* mem_adr_type = nullptr; intptr_t mem_offset = 0; const Node* mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type); Block* mem_block = cfg->get_block_for_node(mem); uint mem_index = block_index(mem_block, mem); if (load_obj == NodeSentinel || mem_obj == NodeSentinel || - load_obj == NULL || mem_obj == NULL || + load_obj == nullptr || mem_obj == nullptr || load_offset < 0 || mem_offset < 0) { continue; } @@ -547,7 +547,7 @@ void XBarrierSetC2::compute_liveness_at_stubs() const { // If this node tracks liveness, update it RegMask* const regs = barrier_set_state()->live(node); - if (regs != NULL) { + if (regs != nullptr) { regs->OR(new_live); } } diff --git a/src/hotspot/share/gc/x/xArray.inline.hpp b/src/hotspot/share/gc/x/xArray.inline.hpp index 9d3cfcfbc65..721e3130095 100644 --- a/src/hotspot/share/gc/x/xArray.inline.hpp +++ b/src/hotspot/share/gc/x/xArray.inline.hpp @@ -67,7 +67,7 @@ inline XArrayIteratorImpl::XArrayIteratorImpl(const T* array, size_ template inline XArrayIteratorImpl::XArrayIteratorImpl(const XArray* array) : - XArrayIteratorImpl(array->is_empty() ? NULL : array->adr_at(0), array->length()) {} + XArrayIteratorImpl(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {} template inline bool XArrayIteratorImpl::next(T* elem) { diff --git a/src/hotspot/share/gc/x/xBarrier.cpp b/src/hotspot/share/gc/x/xBarrier.cpp index a2528a9aaf2..b565df34827 100644 --- a/src/hotspot/share/gc/x/xBarrier.cpp +++ b/src/hotspot/share/gc/x/xBarrier.cpp @@ -215,12 +215,12 @@ uintptr_t XBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) { // oop XBarrier::load_barrier_on_oop_field(volatile narrowOop* p) { ShouldNotReachHere(); - return NULL; + return nullptr; } oop XBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { ShouldNotReachHere(); - return NULL; + return nullptr; } void XBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) { @@ -229,12 +229,12 @@ void XBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) { oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { ShouldNotReachHere(); - return NULL; + return nullptr; } oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { ShouldNotReachHere(); - return NULL; + return nullptr; } oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { @@ -244,19 +244,19 @@ oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oo oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { ShouldNotReachHere(); - return NULL; + return nullptr; } oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { ShouldNotReachHere(); - return NULL; + return nullptr; } #ifdef ASSERT // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents. void XBarrier::verify_on_weak(volatile oop* referent_addr) { - if (referent_addr != NULL) { + if (referent_addr != nullptr) { uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset(); oop obj = cast_to_oop(base); assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base); diff --git a/src/hotspot/share/gc/x/xBarrier.inline.hpp b/src/hotspot/share/gc/x/xBarrier.inline.hpp index 70288b5daac..2319bda4d74 100644 --- a/src/hotspot/share/gc/x/xBarrier.inline.hpp +++ b/src/hotspot/share/gc/x/xBarrier.inline.hpp @@ -150,7 +150,7 @@ inline oop XBarrier::barrier(volatile oop* p, oop o) { // Slow path const uintptr_t good_addr = slow_path(addr); - if (p != NULL) { + if (p != nullptr) { self_heal(p, addr, good_addr); } @@ -171,7 +171,7 @@ inline oop XBarrier::weak_barrier(volatile oop* p, oop o) { // Slow path const uintptr_t good_addr = slow_path(addr); - if (p != NULL) { + if (p != nullptr) { // The slow path returns a good/marked address or null, but we never mark // oops in a weak load barrier so we always heal with the remapped address. self_heal(p, addr, XAddress::remapped_or_null(good_addr)); @@ -226,7 +226,7 @@ inline bool XBarrier::during_relocate() { // Load barrier // inline oop XBarrier::load_barrier_on_oop(oop o) { - return load_barrier_on_oop_field_preloaded((oop*)NULL, o); + return load_barrier_on_oop_field_preloaded((oop*)nullptr, o); } inline oop XBarrier::load_barrier_on_oop_field(volatile oop* p) { @@ -286,7 +286,7 @@ inline oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, o } inline oop XBarrier::weak_load_barrier_on_weak_oop(oop o) { - return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o); + return weak_load_barrier_on_weak_oop_field_preloaded((oop*)nullptr, o); } inline oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { @@ -300,7 +300,7 @@ inline oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* } inline oop XBarrier::weak_load_barrier_on_phantom_oop(oop o) { - return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o); + return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)nullptr, o); } inline oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { @@ -318,14 +318,14 @@ inline bool XBarrier::is_alive_barrier_on_weak_oop(oop o) { // Check if oop is logically non-null. This operation // is only valid when resurrection is blocked. assert(XResurrection::is_blocked(), "Invalid phase"); - return weak_load_barrier_on_weak_oop(o) != NULL; + return weak_load_barrier_on_weak_oop(o) != nullptr; } inline bool XBarrier::is_alive_barrier_on_phantom_oop(oop o) { // Check if oop is logically non-null. This operation // is only valid when resurrection is blocked. assert(XResurrection::is_blocked(), "Invalid phase"); - return weak_load_barrier_on_phantom_oop(o) != NULL; + return weak_load_barrier_on_phantom_oop(o) != nullptr; } // diff --git a/src/hotspot/share/gc/x/xBarrierSet.inline.hpp b/src/hotspot/share/gc/x/xBarrierSet.inline.hpp index a8ec7304e28..5f034b8328b 100644 --- a/src/hotspot/share/gc/x/xBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/x/xBarrierSet.inline.hpp @@ -48,7 +48,7 @@ inline void XBarrierSet::AccessBarrier::verify_decorato template inline oop* XBarrierSet::AccessBarrier::field_addr(oop base, ptrdiff_t offset) { - assert(base != NULL, "Invalid base"); + assert(base != nullptr, "Invalid base"); return reinterpret_cast(reinterpret_cast((void*)base) + offset); } @@ -184,7 +184,7 @@ inline bool XBarrierSet::AccessBarrier::oop_arraycopy_i if (!HasDecorator::value) { // No check cast, bulk barrier and bulk copy XBarrier::load_barrier_on_oop_array(src, length); - return Raw::oop_arraycopy_in_heap(NULL, 0, src, NULL, 0, dst, length); + return Raw::oop_arraycopy_in_heap(nullptr, 0, src, NULL, 0, dst, length); } // Check cast and copy each elements diff --git a/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp b/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp index 37e36597c1d..1670a00434f 100644 --- a/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp +++ b/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp @@ -38,10 +38,10 @@ void XBarrierSetStackChunk::decode_gc_mode(stackChunkOop chunk, OopIterator* ite oop XBarrierSetStackChunk::load_oop(stackChunkOop chunk, oop* addr) { oop obj = Atomic::load(addr); - return XBarrier::load_barrier_on_oop_field_preloaded((volatile oop*)NULL, obj); + return XBarrier::load_barrier_on_oop_field_preloaded((volatile oop*)nullptr, obj); } oop XBarrierSetStackChunk::load_oop(stackChunkOop chunk, narrowOop* addr) { ShouldNotReachHere(); - return NULL; + return nullptr; } diff --git a/src/hotspot/share/gc/x/xCPU.cpp b/src/hotspot/share/gc/x/xCPU.cpp index d212739a305..d21d32aeb35 100644 --- a/src/hotspot/share/gc/x/xCPU.cpp +++ b/src/hotspot/share/gc/x/xCPU.cpp @@ -32,12 +32,12 @@ #define XCPU_UNKNOWN_AFFINITY ((Thread*)-1) #define XCPU_UNKNOWN_SELF ((Thread*)-2) -PaddedEnd* XCPU::_affinity = NULL; +PaddedEnd* XCPU::_affinity = nullptr; THREAD_LOCAL Thread* XCPU::_self = XCPU_UNKNOWN_SELF; THREAD_LOCAL uint32_t XCPU::_cpu = 0; void XCPU::initialize() { - assert(_affinity == NULL, "Already initialized"); + assert(_affinity == nullptr, "Already initialized"); const uint32_t ncpus = count(); _affinity = PaddedArray::create_unfreeable(ncpus); diff --git a/src/hotspot/share/gc/x/xCPU.inline.hpp b/src/hotspot/share/gc/x/xCPU.inline.hpp index ce1f4ec65c9..3cf5bfa96e0 100644 --- a/src/hotspot/share/gc/x/xCPU.inline.hpp +++ b/src/hotspot/share/gc/x/xCPU.inline.hpp @@ -34,7 +34,7 @@ inline uint32_t XCPU::count() { } inline uint32_t XCPU::id() { - assert(_affinity != NULL, "Not initialized"); + assert(_affinity != nullptr, "Not initialized"); // Fast path if (_affinity[_cpu]._thread == _self) { diff --git a/src/hotspot/share/gc/x/xCollectedHeap.cpp b/src/hotspot/share/gc/x/xCollectedHeap.cpp index 935441d627a..e22006dfaed 100644 --- a/src/hotspot/share/gc/x/xCollectedHeap.cpp +++ b/src/hotspot/share/gc/x/xCollectedHeap.cpp @@ -175,7 +175,7 @@ MetaWord* XCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo // Expand and retry allocation MetaWord* const result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); - if (result != NULL) { + if (result != nullptr) { return result; } diff --git a/src/hotspot/share/gc/x/xForwarding.cpp b/src/hotspot/share/gc/x/xForwarding.cpp index 3e8b50d0d64..aa0cd4dff0b 100644 --- a/src/hotspot/share/gc/x/xForwarding.cpp +++ b/src/hotspot/share/gc/x/xForwarding.cpp @@ -157,7 +157,7 @@ XPage* XForwarding::detach_page() { // Detach and return page XPage* const page = _page; - _page = NULL; + _page = nullptr; return page; } @@ -171,7 +171,7 @@ void XForwarding::abort_page() { void XForwarding::verify() const { guarantee(_ref_count != 0, "Invalid reference count"); - guarantee(_page != NULL, "Invalid page"); + guarantee(_page != nullptr, "Invalid page"); uint32_t live_objects = 0; size_t live_bytes = 0; diff --git a/src/hotspot/share/gc/x/xForwardingAllocator.cpp b/src/hotspot/share/gc/x/xForwardingAllocator.cpp index fddff50e88f..c8368fde5f5 100644 --- a/src/hotspot/share/gc/x/xForwardingAllocator.cpp +++ b/src/hotspot/share/gc/x/xForwardingAllocator.cpp @@ -26,9 +26,9 @@ #include "memory/allocation.inline.hpp" XForwardingAllocator::XForwardingAllocator() : - _start(NULL), - _end(NULL), - _top(NULL) {} + _start(nullptr), + _end(nullptr), + _top(nullptr) {} XForwardingAllocator::~XForwardingAllocator() { FREE_C_HEAP_ARRAY(char, _start); diff --git a/src/hotspot/share/gc/x/xForwardingTable.inline.hpp b/src/hotspot/share/gc/x/xForwardingTable.inline.hpp index 3ea30d383ec..b65b68da4e2 100644 --- a/src/hotspot/share/gc/x/xForwardingTable.inline.hpp +++ b/src/hotspot/share/gc/x/xForwardingTable.inline.hpp @@ -44,7 +44,7 @@ inline void XForwardingTable::insert(XForwarding* forwarding) { const uintptr_t offset = forwarding->start(); const size_t size = forwarding->size(); - assert(_map.get(offset) == NULL, "Invalid entry"); + assert(_map.get(offset) == nullptr, "Invalid entry"); _map.put(offset, size, forwarding); } @@ -53,7 +53,7 @@ inline void XForwardingTable::remove(XForwarding* forwarding) { const size_t size = forwarding->size(); assert(_map.get(offset) == forwarding, "Invalid entry"); - _map.put(offset, size, NULL); + _map.put(offset, size, nullptr); } #endif // SHARE_GC_X_XFORWARDINGTABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xHeap.cpp b/src/hotspot/share/gc/x/xHeap.cpp index 21c4d447c9a..a242a8063be 100644 --- a/src/hotspot/share/gc/x/xHeap.cpp +++ b/src/hotspot/share/gc/x/xHeap.cpp @@ -55,7 +55,7 @@ static const XStatCounter XCounterUndoPageAllocation("Memory", "Undo Page Allocation", XStatUnitOpsPerSecond); static const XStatCounter XCounterOutOfMemory("Memory", "Out Of Memory", XStatUnitOpsPerSecond); -XHeap* XHeap::_heap = NULL; +XHeap* XHeap::_heap = nullptr; XHeap::XHeap() : _workers(), @@ -71,7 +71,7 @@ XHeap::XHeap() : _unload(&_workers), _serviceability(min_capacity(), max_capacity()) { // Install global heap instance - assert(_heap == NULL, "Already initialized"); + assert(_heap == nullptr, "Already initialized"); _heap = this; // Update statistics @@ -142,7 +142,7 @@ bool XHeap::is_in(uintptr_t addr) const { if (XAddress::is_in(addr)) { const XPage* const page = _page_table.get(addr); - if (page != NULL) { + if (page != nullptr) { return page->is_in(addr); } } @@ -172,7 +172,7 @@ void XHeap::out_of_memory() { XPage* XHeap::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { XPage* const page = _page_allocator.alloc_page(type, size, flags); - if (page != NULL) { + if (page != nullptr) { // Insert page table entry _page_table.insert(page); } diff --git a/src/hotspot/share/gc/x/xHeap.inline.hpp b/src/hotspot/share/gc/x/xHeap.inline.hpp index 5b3e06b2f4b..793a7200177 100644 --- a/src/hotspot/share/gc/x/xHeap.inline.hpp +++ b/src/hotspot/share/gc/x/xHeap.inline.hpp @@ -34,7 +34,7 @@ #include "utilities/debug.hpp" inline XHeap* XHeap::heap() { - assert(_heap != NULL, "Not initialized"); + assert(_heap != nullptr, "Not initialized"); return _heap; } @@ -89,7 +89,7 @@ inline uintptr_t XHeap::relocate_object(uintptr_t addr) { assert(XGlobalPhase == XPhaseRelocate, "Relocate not allowed"); XForwarding* const forwarding = _forwarding_table.get(addr); - if (forwarding == NULL) { + if (forwarding == nullptr) { // Not forwarding return XAddress::good(addr); } @@ -103,7 +103,7 @@ inline uintptr_t XHeap::remap_object(uintptr_t addr) { XGlobalPhase == XPhaseMarkCompleted, "Forward not allowed"); XForwarding* const forwarding = _forwarding_table.get(addr); - if (forwarding == NULL) { + if (forwarding == nullptr) { // Not forwarding return XAddress::good(addr); } diff --git a/src/hotspot/share/gc/x/xHeapIterator.cpp b/src/hotspot/share/gc/x/xHeapIterator.cpp index 614f0089356..3d3549e7324 100644 --- a/src/hotspot/share/gc/x/xHeapIterator.cpp +++ b/src/hotspot/share/gc/x/xHeapIterator.cpp @@ -255,10 +255,10 @@ static size_t object_index(oop obj) { XHeapIteratorBitMap* XHeapIterator::object_bitmap(oop obj) { const uintptr_t offset = XAddress::offset(XOop::to_address(obj)); XHeapIteratorBitMap* bitmap = _bitmaps.get_acquire(offset); - if (bitmap == NULL) { + if (bitmap == nullptr) { XLocker locker(&_bitmaps_lock); bitmap = _bitmaps.get(offset); - if (bitmap == NULL) { + if (bitmap == nullptr) { // Install new bitmap bitmap = new XHeapIteratorBitMap(object_index_max()); _bitmaps.release_put(offset, bitmap); @@ -269,7 +269,7 @@ XHeapIteratorBitMap* XHeapIterator::object_bitmap(oop obj) { } bool XHeapIterator::mark_object(oop obj) { - if (obj == NULL) { + if (obj == nullptr) { return false; } diff --git a/src/hotspot/share/gc/x/xList.inline.hpp b/src/hotspot/share/gc/x/xList.inline.hpp index 25c28fbda43..22ca5b82059 100644 --- a/src/hotspot/share/gc/x/xList.inline.hpp +++ b/src/hotspot/share/gc/x/xList.inline.hpp @@ -110,12 +110,12 @@ inline bool XList::is_empty() const { template inline T* XList::first() const { - return is_empty() ? NULL : cast_to_outer(_head._next); + return is_empty() ? nullptr : cast_to_outer(_head._next); } template inline T* XList::last() const { - return is_empty() ? NULL : cast_to_outer(_head._prev); + return is_empty() ? nullptr : cast_to_outer(_head._prev); } template @@ -128,7 +128,7 @@ inline T* XList::next(T* elem) const { XListNode* const next = node->_next; next->verify_links_linked(); - return (next == &_head) ? NULL : cast_to_outer(next); + return (next == &_head) ? nullptr : cast_to_outer(next); } template @@ -141,7 +141,7 @@ inline T* XList::prev(T* elem) const { XListNode* const prev = node->_prev; prev->verify_links_linked(); - return (prev == &_head) ? NULL : cast_to_outer(prev); + return (prev == &_head) ? nullptr : cast_to_outer(prev); } template @@ -191,7 +191,7 @@ inline void XList::remove(T* elem) { template inline T* XList::remove_first() { T* elem = first(); - if (elem != NULL) { + if (elem != nullptr) { remove(elem); } @@ -201,7 +201,7 @@ inline T* XList::remove_first() { template inline T* XList::remove_last() { T* elem = last(); - if (elem != NULL) { + if (elem != nullptr) { remove(elem); } @@ -215,7 +215,7 @@ inline XListIteratorImpl::XListIteratorImpl(const XList* list) : template inline bool XListIteratorImpl::next(T** elem) { - if (_next != NULL) { + if (_next != nullptr) { *elem = _next; _next = Forward ? _list->next(_next) : _list->prev(_next); return true; @@ -232,7 +232,7 @@ inline XListRemoveIteratorImpl::XListRemoveIteratorImpl(XList* li template inline bool XListRemoveIteratorImpl::next(T** elem) { *elem = Forward ? _list->remove_first() : _list->remove_last(); - return *elem != NULL; + return *elem != nullptr; } #endif // SHARE_GC_X_XLIST_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xLock.inline.hpp b/src/hotspot/share/gc/x/xLock.inline.hpp index 07a673376a6..a72b65aa228 100644 --- a/src/hotspot/share/gc/x/xLock.inline.hpp +++ b/src/hotspot/share/gc/x/xLock.inline.hpp @@ -45,7 +45,7 @@ inline void XLock::unlock() { inline XReentrantLock::XReentrantLock() : _lock(), - _owner(NULL), + _owner(nullptr), _count(0) {} inline void XReentrantLock::lock() { @@ -67,7 +67,7 @@ inline void XReentrantLock::unlock() { _count--; if (_count == 0) { - Atomic::store(&_owner, (Thread*)NULL); + Atomic::store(&_owner, (Thread*)nullptr); _lock.unlock(); } } @@ -105,14 +105,14 @@ inline void XConditionLock::notify_all() { template inline XLocker::XLocker(T* lock) : _lock(lock) { - if (_lock != NULL) { + if (_lock != nullptr) { _lock->lock(); } } template inline XLocker::~XLocker() { - if (_lock != NULL) { + if (_lock != nullptr) { _lock->unlock(); } } diff --git a/src/hotspot/share/gc/x/xMark.cpp b/src/hotspot/share/gc/x/xMark.cpp index 16574364ef9..e4e5c25f34f 100644 --- a/src/hotspot/share/gc/x/xMark.cpp +++ b/src/hotspot/share/gc/x/xMark.cpp @@ -254,7 +254,7 @@ public: ? ClassLoaderData::_claim_finalizable : ClassLoaderData::_claim_strong, finalizable - ? NULL + ? nullptr : XHeap::heap()->reference_discoverer()) {} virtual void do_oop(oop* p) { @@ -403,7 +403,7 @@ bool XMark::try_steal_local(XMarkContext* context) { victim_stripe != stripe; victim_stripe = _stripes.stripe_next(victim_stripe)) { XMarkStack* const stack = stacks->steal(&_stripes, victim_stripe); - if (stack != NULL) { + if (stack != nullptr) { // Success, install the stolen stack stacks->install(&_stripes, stripe, stack); return true; @@ -423,7 +423,7 @@ bool XMark::try_steal_global(XMarkContext* context) { victim_stripe != stripe; victim_stripe = _stripes.stripe_next(victim_stripe)) { XMarkStack* const stack = victim_stripe->steal_stack(); - if (stack != NULL) { + if (stack != nullptr) { // Success, install the stolen stack stacks->install(&_stripes, stripe, stack); return true; diff --git a/src/hotspot/share/gc/x/xMarkCache.cpp b/src/hotspot/share/gc/x/xMarkCache.cpp index bb70683221c..60d1037a22d 100644 --- a/src/hotspot/share/gc/x/xMarkCache.cpp +++ b/src/hotspot/share/gc/x/xMarkCache.cpp @@ -27,7 +27,7 @@ #include "utilities/powerOfTwo.hpp" XMarkCacheEntry::XMarkCacheEntry() : - _page(NULL), + _page(nullptr), _objects(0), _bytes(0) {} diff --git a/src/hotspot/share/gc/x/xMarkCache.inline.hpp b/src/hotspot/share/gc/x/xMarkCache.inline.hpp index 8eaf04a68fe..27dd1b93339 100644 --- a/src/hotspot/share/gc/x/xMarkCache.inline.hpp +++ b/src/hotspot/share/gc/x/xMarkCache.inline.hpp @@ -43,10 +43,10 @@ inline void XMarkCacheEntry::inc_live(XPage* page, size_t bytes) { } inline void XMarkCacheEntry::evict() { - if (_page != NULL) { + if (_page != nullptr) { // Write cached data out to page _page->inc_live(_objects, _bytes); - _page = NULL; + _page = nullptr; } } diff --git a/src/hotspot/share/gc/x/xMarkStack.cpp b/src/hotspot/share/gc/x/xMarkStack.cpp index 384dc200a95..6f7619c9a35 100644 --- a/src/hotspot/share/gc/x/xMarkStack.cpp +++ b/src/hotspot/share/gc/x/xMarkStack.cpp @@ -79,16 +79,16 @@ XMarkStripe* XMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) { } XMarkThreadLocalStacks::XMarkThreadLocalStacks() : - _magazine(NULL) { + _magazine(nullptr) { for (size_t i = 0; i < XMarkStripesMax; i++) { - _stacks[i] = NULL; + _stacks[i] = nullptr; } } bool XMarkThreadLocalStacks::is_empty(const XMarkStripeSet* stripes) const { for (size_t i = 0; i < stripes->nstripes(); i++) { XMarkStack* const stack = _stacks[i]; - if (stack != NULL) { + if (stack != nullptr) { return false; } } @@ -97,21 +97,21 @@ bool XMarkThreadLocalStacks::is_empty(const XMarkStripeSet* stripes) const { } XMarkStack* XMarkThreadLocalStacks::allocate_stack(XMarkStackAllocator* allocator) { - if (_magazine == NULL) { + if (_magazine == nullptr) { // Allocate new magazine _magazine = allocator->alloc_magazine(); - if (_magazine == NULL) { - return NULL; + if (_magazine == nullptr) { + return nullptr; } } - XMarkStack* stack = NULL; + XMarkStack* stack = nullptr; if (!_magazine->pop(stack)) { // Magazine is empty, convert magazine into a new stack _magazine->~XMarkStackMagazine(); stack = new ((void*)_magazine) XMarkStack(); - _magazine = NULL; + _magazine = nullptr; } return stack; @@ -119,7 +119,7 @@ XMarkStack* XMarkThreadLocalStacks::allocate_stack(XMarkStackAllocator* allocato void XMarkThreadLocalStacks::free_stack(XMarkStackAllocator* allocator, XMarkStack* stack) { for (;;) { - if (_magazine == NULL) { + if (_magazine == nullptr) { // Convert stack into a new magazine stack->~XMarkStack(); _magazine = new ((void*)stack) XMarkStackMagazine(); @@ -133,7 +133,7 @@ void XMarkThreadLocalStacks::free_stack(XMarkStackAllocator* allocator, XMarkSta // Free and uninstall full magazine allocator->free_magazine(_magazine); - _magazine = NULL; + _magazine = nullptr; } } @@ -145,10 +145,10 @@ bool XMarkThreadLocalStacks::push_slow(XMarkStackAllocator* allocator, XMarkStack* stack = *stackp; for (;;) { - if (stack == NULL) { + if (stack == nullptr) { // Allocate and install new stack *stackp = stack = allocate_stack(allocator); - if (stack == NULL) { + if (stack == nullptr) { // Out of mark stack memory return false; } @@ -161,7 +161,7 @@ bool XMarkThreadLocalStacks::push_slow(XMarkStackAllocator* allocator, // Publish/Overflow and uninstall stack stripe->publish_stack(stack, publish); - *stackp = stack = NULL; + *stackp = stack = nullptr; } } @@ -172,10 +172,10 @@ bool XMarkThreadLocalStacks::pop_slow(XMarkStackAllocator* allocator, XMarkStack* stack = *stackp; for (;;) { - if (stack == NULL) { + if (stack == nullptr) { // Try steal and install stack *stackp = stack = stripe->steal_stack(); - if (stack == NULL) { + if (stack == nullptr) { // Nothing to steal return false; } @@ -188,7 +188,7 @@ bool XMarkThreadLocalStacks::pop_slow(XMarkStackAllocator* allocator, // Free and uninstall stack free_stack(allocator, stack); - *stackp = stack = NULL; + *stackp = stack = nullptr; } } @@ -200,7 +200,7 @@ bool XMarkThreadLocalStacks::flush(XMarkStackAllocator* allocator, XMarkStripeSe XMarkStripe* const stripe = stripes->stripe_at(i); XMarkStack** const stackp = &_stacks[i]; XMarkStack* const stack = *stackp; - if (stack == NULL) { + if (stack == nullptr) { continue; } @@ -211,7 +211,7 @@ bool XMarkThreadLocalStacks::flush(XMarkStackAllocator* allocator, XMarkStripeSe stripe->publish_stack(stack); flushed = true; } - *stackp = NULL; + *stackp = nullptr; } return flushed; @@ -219,8 +219,8 @@ bool XMarkThreadLocalStacks::flush(XMarkStackAllocator* allocator, XMarkStripeSe void XMarkThreadLocalStacks::free(XMarkStackAllocator* allocator) { // Free and uninstall magazine - if (_magazine != NULL) { + if (_magazine != nullptr) { allocator->free_magazine(_magazine); - _magazine = NULL; + _magazine = nullptr; } } diff --git a/src/hotspot/share/gc/x/xMarkStack.inline.hpp b/src/hotspot/share/gc/x/xMarkStack.inline.hpp index 95047c0954a..e643c1e3224 100644 --- a/src/hotspot/share/gc/x/xMarkStack.inline.hpp +++ b/src/hotspot/share/gc/x/xMarkStack.inline.hpp @@ -32,7 +32,7 @@ template inline XStack::XStack() : _top(0), - _next(NULL) {} + _next(nullptr) {} template inline bool XStack::is_empty() const { @@ -76,13 +76,13 @@ inline XStack** XStack::next_addr() { template inline XStackList::XStackList() : - _head(encode_versioned_pointer(NULL, 0)) {} + _head(encode_versioned_pointer(nullptr, 0)) {} template inline T* XStackList::encode_versioned_pointer(const T* stack, uint32_t version) const { uint64_t addr; - if (stack == NULL) { + if (stack == nullptr) { addr = (uint32_t)-1; } else { addr = ((uint64_t)stack - XMarkStackSpaceStart) >> XMarkStackSizeShift; @@ -96,7 +96,7 @@ inline void XStackList::decode_versioned_pointer(const T* vstack, T** stack, const uint64_t addr = (uint64_t)vstack >> 32; if (addr == (uint32_t)-1) { - *stack = NULL; + *stack = nullptr; } else { *stack = (T*)((addr << XMarkStackSizeShift) + XMarkStackSpaceStart); } @@ -107,11 +107,11 @@ inline void XStackList::decode_versioned_pointer(const T* vstack, T** stack, template inline bool XStackList::is_empty() const { const T* vstack = _head; - T* stack = NULL; + T* stack = nullptr; uint32_t version = 0; decode_versioned_pointer(vstack, &stack, &version); - return stack == NULL; + return stack == nullptr; } template @@ -136,13 +136,13 @@ inline void XStackList::push(T* stack) { template inline T* XStackList::pop() { T* vstack = _head; - T* stack = NULL; + T* stack = nullptr; uint32_t version = 0; for (;;) { decode_versioned_pointer(vstack, &stack, &version); - if (stack == NULL) { - return NULL; + if (stack == nullptr) { + return nullptr; } T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1); @@ -159,7 +159,7 @@ inline T* XStackList::pop() { template inline void XStackList::clear() { - _head = encode_versioned_pointer(NULL, 0); + _head = encode_versioned_pointer(nullptr, 0); } inline bool XMarkStripe::is_empty() const { @@ -183,7 +183,7 @@ inline void XMarkStripe::publish_stack(XMarkStack* stack, bool publish) { inline XMarkStack* XMarkStripe::steal_stack() { // Steal overflowed stacks first, then published stacks XMarkStack* const stack = _overflowed.pop(); - if (stack != NULL) { + if (stack != nullptr) { return stack; } @@ -221,7 +221,7 @@ inline void XMarkThreadLocalStacks::install(XMarkStripeSet* stripes, XMarkStripe* stripe, XMarkStack* stack) { XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; - assert(*stackp == NULL, "Should be empty"); + assert(*stackp == nullptr, "Should be empty"); *stackp = stack; } @@ -229,8 +229,8 @@ inline XMarkStack* XMarkThreadLocalStacks::steal(XMarkStripeSet* stripes, XMarkStripe* stripe) { XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; XMarkStack* const stack = *stackp; - if (stack != NULL) { - *stackp = NULL; + if (stack != nullptr) { + *stackp = nullptr; } return stack; @@ -243,7 +243,7 @@ inline bool XMarkThreadLocalStacks::push(XMarkStackAllocator* allocator, bool publish) { XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; XMarkStack* const stack = *stackp; - if (stack != NULL && stack->push(entry)) { + if (stack != nullptr && stack->push(entry)) { return true; } @@ -256,7 +256,7 @@ inline bool XMarkThreadLocalStacks::pop(XMarkStackAllocator* allocator, XMarkStackEntry& entry) { XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; XMarkStack* const stack = *stackp; - if (stack != NULL && stack->pop(entry)) { + if (stack != nullptr && stack->pop(entry)) { return true; } diff --git a/src/hotspot/share/gc/x/xMarkStackAllocator.cpp b/src/hotspot/share/gc/x/xMarkStackAllocator.cpp index 95924b35392..b5cc3ad641a 100644 --- a/src/hotspot/share/gc/x/xMarkStackAllocator.cpp +++ b/src/hotspot/share/gc/x/xMarkStackAllocator.cpp @@ -198,14 +198,14 @@ XMarkStackMagazine* XMarkStackAllocator::create_magazine_from_space(uintptr_t ad XMarkStackMagazine* XMarkStackAllocator::alloc_magazine() { // Try allocating from the free list first XMarkStackMagazine* const magazine = _freelist.pop(); - if (magazine != NULL) { + if (magazine != nullptr) { return magazine; } // Allocate new magazine const uintptr_t addr = _space.alloc(XMarkStackMagazineSize); if (addr == 0) { - return NULL; + return nullptr; } return create_magazine_from_space(addr, XMarkStackMagazineSize); diff --git a/src/hotspot/share/gc/x/xMemory.cpp b/src/hotspot/share/gc/x/xMemory.cpp index bdfa45339c2..e394f580ab9 100644 --- a/src/hotspot/share/gc/x/xMemory.cpp +++ b/src/hotspot/share/gc/x/xMemory.cpp @@ -28,54 +28,54 @@ XMemory* XMemoryManager::create(uintptr_t start, size_t size) { XMemory* const area = new XMemory(start, size); - if (_callbacks._create != NULL) { + if (_callbacks._create != nullptr) { _callbacks._create(area); } return area; } void XMemoryManager::destroy(XMemory* area) { - if (_callbacks._destroy != NULL) { + if (_callbacks._destroy != nullptr) { _callbacks._destroy(area); } delete area; } void XMemoryManager::shrink_from_front(XMemory* area, size_t size) { - if (_callbacks._shrink_from_front != NULL) { + if (_callbacks._shrink_from_front != nullptr) { _callbacks._shrink_from_front(area, size); } area->shrink_from_front(size); } void XMemoryManager::shrink_from_back(XMemory* area, size_t size) { - if (_callbacks._shrink_from_back != NULL) { + if (_callbacks._shrink_from_back != nullptr) { _callbacks._shrink_from_back(area, size); } area->shrink_from_back(size); } void XMemoryManager::grow_from_front(XMemory* area, size_t size) { - if (_callbacks._grow_from_front != NULL) { + if (_callbacks._grow_from_front != nullptr) { _callbacks._grow_from_front(area, size); } area->grow_from_front(size); } void XMemoryManager::grow_from_back(XMemory* area, size_t size) { - if (_callbacks._grow_from_back != NULL) { + if (_callbacks._grow_from_back != nullptr) { _callbacks._grow_from_back(area, size); } area->grow_from_back(size); } XMemoryManager::Callbacks::Callbacks() : - _create(NULL), - _destroy(NULL), - _shrink_from_front(NULL), - _shrink_from_back(NULL), - _grow_from_front(NULL), - _grow_from_back(NULL) {} + _create(nullptr), + _destroy(nullptr), + _shrink_from_front(nullptr), + _shrink_from_back(nullptr), + _grow_from_front(nullptr), + _grow_from_back(nullptr) {} XMemoryManager::XMemoryManager() : _freelist(), @@ -89,7 +89,7 @@ uintptr_t XMemoryManager::peek_low_address() const { XLocker locker(&_lock); const XMemory* const area = _freelist.first(); - if (area != NULL) { + if (area != nullptr) { return area->start(); } @@ -126,7 +126,7 @@ uintptr_t XMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocat XLocker locker(&_lock); XMemory* area = _freelist.first(); - if (area != NULL) { + if (area != nullptr) { if (area->size() <= size) { // Smaller than or equal to requested, remove area const uintptr_t start = area->start(); @@ -182,7 +182,7 @@ void XMemoryManager::free(uintptr_t start, size_t size) { for (XMemory* area; iter.next(&area);) { if (start < area->start()) { XMemory* const prev = _freelist.prev(area); - if (prev != NULL && start == prev->end()) { + if (prev != nullptr && start == prev->end()) { if (end == area->start()) { // Merge with prev and current area grow_from_back(prev, size + area->size()); @@ -209,7 +209,7 @@ void XMemoryManager::free(uintptr_t start, size_t size) { // Insert last XMemory* const last = _freelist.last(); - if (last != NULL && start == last->end()) { + if (last != nullptr && start == last->end()) { // Merge with last area grow_from_back(last, size); } else { diff --git a/src/hotspot/share/gc/x/xNMethod.cpp b/src/hotspot/share/gc/x/xNMethod.cpp index 20d8982bc07..d86828aa847 100644 --- a/src/hotspot/share/gc/x/xNMethod.cpp +++ b/src/hotspot/share/gc/x/xNMethod.cpp @@ -74,17 +74,17 @@ void XNMethod::attach_gc_data(nmethod* nm) { continue; } - if (r->oop_value() != NULL) { - // Non-NULL immediate oop found. NULL oops can safely be + if (r->oop_value() != nullptr) { + // Non-null immediate oop found. Null oops can safely be // ignored since the method will be re-registered if they - // are later patched to be non-NULL. + // are later patched to be non-null. immediate_oops.push(r->oop_addr()); } } // Attach GC data to nmethod XNMethodData* data = gc_data(nm); - if (data == NULL) { + if (data == nullptr) { data = new XNMethodData(); set_gc_data(nm, data); } diff --git a/src/hotspot/share/gc/x/xNMethodData.cpp b/src/hotspot/share/gc/x/xNMethodData.cpp index f024e0d3cee..effc12b2255 100644 --- a/src/hotspot/share/gc/x/xNMethodData.cpp +++ b/src/hotspot/share/gc/x/xNMethodData.cpp @@ -66,7 +66,7 @@ bool XNMethodDataOops::has_non_immediates() const { XNMethodData::XNMethodData() : _lock(), - _oops(NULL) {} + _oops(nullptr) {} XNMethodData::~XNMethodData() { XNMethodDataOops::destroy(_oops); diff --git a/src/hotspot/share/gc/x/xNMethodTable.cpp b/src/hotspot/share/gc/x/xNMethodTable.cpp index 70ceb7a9219..f866c581684 100644 --- a/src/hotspot/share/gc/x/xNMethodTable.cpp +++ b/src/hotspot/share/gc/x/xNMethodTable.cpp @@ -45,7 +45,7 @@ #include "utilities/debug.hpp" #include "utilities/powerOfTwo.hpp" -XNMethodTableEntry* XNMethodTable::_table = NULL; +XNMethodTableEntry* XNMethodTable::_table = nullptr; size_t XNMethodTable::_size = 0; size_t XNMethodTable::_nregistered = 0; size_t XNMethodTable::_nunregistered = 0; diff --git a/src/hotspot/share/gc/x/xNMethodTableEntry.hpp b/src/hotspot/share/gc/x/xNMethodTableEntry.hpp index 78138492ef9..9f06abb0bdb 100644 --- a/src/hotspot/share/gc/x/xNMethodTableEntry.hpp +++ b/src/hotspot/share/gc/x/xNMethodTableEntry.hpp @@ -58,7 +58,7 @@ public: explicit XNMethodTableEntry(bool unregistered = false) : _entry(field_registered::encode(false) | field_unregistered::encode(unregistered) | - field_method::encode(NULL)) {} + field_method::encode(nullptr)) {} explicit XNMethodTableEntry(nmethod* method) : _entry(field_registered::encode(true) | diff --git a/src/hotspot/share/gc/x/xNMethodTableIteration.cpp b/src/hotspot/share/gc/x/xNMethodTableIteration.cpp index fd8bd8561b4..c9248e63420 100644 --- a/src/hotspot/share/gc/x/xNMethodTableIteration.cpp +++ b/src/hotspot/share/gc/x/xNMethodTableIteration.cpp @@ -30,12 +30,12 @@ #include "utilities/globalDefinitions.hpp" XNMethodTableIteration::XNMethodTableIteration() : - _table(NULL), + _table(nullptr), _size(0), _claimed(0) {} bool XNMethodTableIteration::in_progress() const { - return _table != NULL; + return _table != nullptr; } void XNMethodTableIteration::nmethods_do_begin(XNMethodTableEntry* table, size_t size) { @@ -50,7 +50,7 @@ void XNMethodTableIteration::nmethods_do_end() { assert(_claimed >= _size, "Failed to claim all table entries"); // Finish iteration - _table = NULL; + _table = nullptr; } void XNMethodTableIteration::nmethods_do(NMethodClosure* cl) { diff --git a/src/hotspot/share/gc/x/xObjectAllocator.cpp b/src/hotspot/share/gc/x/xObjectAllocator.cpp index 589e2f2feba..26981ce9131 100644 --- a/src/hotspot/share/gc/x/xObjectAllocator.cpp +++ b/src/hotspot/share/gc/x/xObjectAllocator.cpp @@ -46,8 +46,8 @@ XObjectAllocator::XObjectAllocator() : _undone(0), _alloc_for_relocation(0), _undo_alloc_for_relocation(0), - _shared_medium_page(NULL), - _shared_small_page(NULL) {} + _shared_medium_page(nullptr), + _shared_small_page(nullptr) {} XPage** XObjectAllocator::shared_small_page_addr() { return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0); @@ -70,7 +70,7 @@ void XObjectAllocator::register_undo_alloc_for_relocation(const XPage* page, siz XPage* XObjectAllocator::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { XPage* const page = XHeap::heap()->alloc_page(type, size, flags); - if (page != NULL) { + if (page != nullptr) { // Increment used bytes Atomic::add(_used.addr(), size); } @@ -93,14 +93,14 @@ uintptr_t XObjectAllocator::alloc_object_in_shared_page(XPage** shared_page, uintptr_t addr = 0; XPage* page = Atomic::load_acquire(shared_page); - if (page != NULL) { + if (page != nullptr) { addr = page->alloc_object_atomic(size); } if (addr == 0) { // Allocate new page XPage* const new_page = alloc_page(page_type, page_size, flags); - if (new_page != NULL) { + if (new_page != nullptr) { // Allocate object before installing the new page addr = new_page->alloc_object(size); @@ -108,7 +108,7 @@ uintptr_t XObjectAllocator::alloc_object_in_shared_page(XPage** shared_page, // Install new page XPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page); if (prev_page != page) { - if (prev_page == NULL) { + if (prev_page == nullptr) { // Previous page was retired, retry installing the new page page = prev_page; goto retry; @@ -140,7 +140,7 @@ uintptr_t XObjectAllocator::alloc_large_object(size_t size, XAllocationFlags fla // Allocate new large page const size_t page_size = align_up(size, XGranuleSize); XPage* const page = alloc_page(XPageTypeLarge, page_size, flags); - if (page != NULL) { + if (page != nullptr) { // Allocate the object addr = page->alloc_object(size); } @@ -224,7 +224,7 @@ size_t XObjectAllocator::remaining() const { assert(XThread::is_java(), "Should be a Java thread"); const XPage* const page = Atomic::load_acquire(shared_small_page_addr()); - if (page != NULL) { + if (page != nullptr) { return page->remaining(); } @@ -262,6 +262,6 @@ void XObjectAllocator::retire_pages() { _undo_alloc_for_relocation.set_all(0); // Reset allocation pages - _shared_medium_page.set(NULL); - _shared_small_page.set_all(NULL); + _shared_medium_page.set(nullptr); + _shared_small_page.set_all(nullptr); } diff --git a/src/hotspot/share/gc/x/xPage.cpp b/src/hotspot/share/gc/x/xPage.cpp index 896adb82768..b48500ab96e 100644 --- a/src/hotspot/share/gc/x/xPage.cpp +++ b/src/hotspot/share/gc/x/xPage.cpp @@ -103,7 +103,7 @@ XPage* XPage::split_committed() { const XPhysicalMemory pmem = _physical.split_committed(); if (pmem.is_null()) { // Nothing committed - return NULL; + return nullptr; } assert(!_physical.is_null(), "Should not be null"); diff --git a/src/hotspot/share/gc/x/xPageAllocator.cpp b/src/hotspot/share/gc/x/xPageAllocator.cpp index 10885c86488..ccc715682c0 100644 --- a/src/hotspot/share/gc/x/xPageAllocator.cpp +++ b/src/hotspot/share/gc/x/xPageAllocator.cpp @@ -219,7 +219,7 @@ bool XPageAllocator::prime_cache(XWorkers* workers, size_t size) { flags.set_low_address(); XPage* const page = alloc_page(XPageTypeLarge, size, flags); - if (page == NULL) { + if (page == nullptr) { return false; } @@ -400,7 +400,7 @@ bool XPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, XListinsert_last(page); return true; @@ -519,7 +519,7 @@ XPage* XPageAllocator::alloc_page_create(XPageAllocation* allocation) { const XVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) { log_error(gc)("Out of address space"); - return NULL; + return nullptr; } XPhysicalMemory pmem; @@ -607,9 +607,9 @@ XPage* XPageAllocator::alloc_page_finalize(XPageAllocation* allocation) { // Slow path XPage* const page = alloc_page_create(allocation); - if (page == NULL) { + if (page == nullptr) { // Out of address space - return NULL; + return nullptr; } // Commit page @@ -625,12 +625,12 @@ XPage* XPageAllocator::alloc_page_finalize(XPageAllocation* allocation) { XPage* const committed_page = page->split_committed(); destroy_page(page); - if (committed_page != NULL) { + if (committed_page != nullptr) { map_page(committed_page); allocation->pages()->insert_last(committed_page); } - return NULL; + return nullptr; } void XPageAllocator::alloc_page_failed(XPageAllocation* allocation) { @@ -667,11 +667,11 @@ retry: // block in a safepoint if the non-blocking flag is not set. if (!alloc_page_or_stall(&allocation)) { // Out of memory - return NULL; + return nullptr; } XPage* const page = alloc_page_finalize(&allocation); - if (page == NULL) { + if (page == nullptr) { // Failed to commit or map. Clean up and retry, in the hope that // we can still allocate by flushing the page cache (more aggressively). alloc_page_failed(&allocation); @@ -703,7 +703,7 @@ retry: void XPageAllocator::satisfy_stalled() { for (;;) { XPageAllocation* const allocation = _stalled.first(); - if (allocation == NULL) { + if (allocation == nullptr) { // Allocation queue is empty return; } @@ -850,7 +850,7 @@ void XPageAllocator::check_out_of_memory() { // Fail allocation requests that were enqueued before the // last GC cycle started, otherwise start a new GC cycle. - for (XPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) { + for (XPageAllocation* allocation = _stalled.first(); allocation != nullptr; allocation = _stalled.first()) { if (allocation->seqnum() == XGlobalSeqNum) { // Start a new GC cycle, keep allocation requests enqueued allocation->satisfy(XPageAllocationStallStartGC); diff --git a/src/hotspot/share/gc/x/xPageCache.cpp b/src/hotspot/share/gc/x/xPageCache.cpp index 8f8a6636369..d38b0646a8a 100644 --- a/src/hotspot/share/gc/x/xPageCache.cpp +++ b/src/hotspot/share/gc/x/xPageCache.cpp @@ -66,7 +66,7 @@ XPage* XPageCache::alloc_small_page() { // Try NUMA local page cache XPage* const l1_page = _small.get(numa_id).remove_first(); - if (l1_page != NULL) { + if (l1_page != nullptr) { XStatInc(XCounterPageCacheHitL1); return l1_page; } @@ -80,7 +80,7 @@ XPage* XPageCache::alloc_small_page() { } XPage* const l2_page = _small.get(remote_numa_id).remove_first(); - if (l2_page != NULL) { + if (l2_page != nullptr) { XStatInc(XCounterPageCacheHitL2); return l2_page; } @@ -88,17 +88,17 @@ XPage* XPageCache::alloc_small_page() { remote_numa_id++; } - return NULL; + return nullptr; } XPage* XPageCache::alloc_medium_page() { XPage* const page = _medium.remove_first(); - if (page != NULL) { + if (page != nullptr) { XStatInc(XCounterPageCacheHitL1); return page; } - return NULL; + return nullptr; } XPage* XPageCache::alloc_large_page(size_t size) { @@ -113,7 +113,7 @@ XPage* XPageCache::alloc_large_page(size_t size) { } } - return NULL; + return nullptr; } XPage* XPageCache::alloc_oversized_medium_page(size_t size) { @@ -121,7 +121,7 @@ XPage* XPageCache::alloc_oversized_medium_page(size_t size) { return _medium.remove_first(); } - return NULL; + return nullptr; } XPage* XPageCache::alloc_oversized_large_page(size_t size) { @@ -135,16 +135,16 @@ XPage* XPageCache::alloc_oversized_large_page(size_t size) { } } - return NULL; + return nullptr; } XPage* XPageCache::alloc_oversized_page(size_t size) { XPage* page = alloc_oversized_large_page(size); - if (page == NULL) { + if (page == nullptr) { page = alloc_oversized_medium_page(size); } - if (page != NULL) { + if (page != nullptr) { XStatInc(XCounterPageCacheHitL3); } @@ -163,10 +163,10 @@ XPage* XPageCache::alloc_page(uint8_t type, size_t size) { page = alloc_large_page(size); } - if (page == NULL) { + if (page == nullptr) { // Try allocate potentially oversized page XPage* const oversized = alloc_oversized_page(size); - if (oversized != NULL) { + if (oversized != nullptr) { if (size < oversized->size()) { // Split oversized page page = oversized->split(type, size); @@ -180,7 +180,7 @@ XPage* XPageCache::alloc_page(uint8_t type, size_t size) { } } - if (page == NULL) { + if (page == nullptr) { XStatInc(XCounterPageCacheMiss); } @@ -200,7 +200,7 @@ void XPageCache::free_page(XPage* page) { bool XPageCache::flush_list_inner(XPageCacheFlushClosure* cl, XList* from, XList* to) { XPage* const page = from->last(); - if (page == NULL || !cl->do_page(page)) { + if (page == nullptr || !cl->do_page(page)) { // Don't flush page return false; } diff --git a/src/hotspot/share/gc/x/xPageTable.cpp b/src/hotspot/share/gc/x/xPageTable.cpp index 6cdb7c929e1..c3103e808ca 100644 --- a/src/hotspot/share/gc/x/xPageTable.cpp +++ b/src/hotspot/share/gc/x/xPageTable.cpp @@ -40,7 +40,7 @@ void XPageTable::insert(XPage* page) { // visible before updating the page table. OrderAccess::storestore(); - assert(_map.get(offset) == NULL, "Invalid entry"); + assert(_map.get(offset) == nullptr, "Invalid entry"); _map.put(offset, size, page); } @@ -49,5 +49,5 @@ void XPageTable::remove(XPage* page) { const size_t size = page->size(); assert(_map.get(offset) == page, "Invalid entry"); - _map.put(offset, size, NULL); + _map.put(offset, size, nullptr); } diff --git a/src/hotspot/share/gc/x/xPageTable.inline.hpp b/src/hotspot/share/gc/x/xPageTable.inline.hpp index c4f30d3e9c3..49fe8ecfddc 100644 --- a/src/hotspot/share/gc/x/xPageTable.inline.hpp +++ b/src/hotspot/share/gc/x/xPageTable.inline.hpp @@ -36,7 +36,7 @@ inline XPage* XPageTable::get(uintptr_t addr) const { inline XPageTableIterator::XPageTableIterator(const XPageTable* page_table) : _iter(&page_table->_map), - _prev(NULL) {} + _prev(nullptr) {} inline bool XPageTableIterator::next(XPage** page) { for (XPage* entry; _iter.next(&entry);) { diff --git a/src/hotspot/share/gc/x/xReferenceProcessor.cpp b/src/hotspot/share/gc/x/xReferenceProcessor.cpp index 4d6f05e5922..acbb96eaf41 100644 --- a/src/hotspot/share/gc/x/xReferenceProcessor.cpp +++ b/src/hotspot/share/gc/x/xReferenceProcessor.cpp @@ -106,12 +106,12 @@ static void soft_reference_update_clock() { XReferenceProcessor::XReferenceProcessor(XWorkers* workers) : _workers(workers), - _soft_reference_policy(NULL), + _soft_reference_policy(nullptr), _encountered_count(), _discovered_count(), _enqueued_count(), - _discovered_list(NULL), - _pending_list(NULL), + _discovered_list(nullptr), + _pending_list(nullptr), _pending_list_tail(_pending_list.addr()) {} void XReferenceProcessor::set_soft_reference_policy(bool clear) { @@ -132,11 +132,11 @@ bool XReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType if (type == REF_FINAL) { // A FinalReference is inactive if its next field is non-null. An application can't // call enqueue() or clear() on a FinalReference. - return reference_next(reference) != NULL; + return reference_next(reference) != nullptr; } else { // A non-FinalReference is inactive if the referent is null. The referent can only // be null if the application called Reference.enqueue() or Reference.clear(). - return referent == NULL; + return referent == nullptr; } } @@ -153,7 +153,7 @@ bool XReferenceProcessor::is_softly_live(oop reference, ReferenceType type) cons // Ask SoftReference policy const jlong clock = java_lang_ref_SoftReference::clock(); assert(clock != 0, "Clock not initialized"); - assert(_soft_reference_policy != NULL, "Policy not initialized"); + assert(_soft_reference_policy != nullptr, "Policy not initialized"); return !_soft_reference_policy->should_clear_reference(reference, clock); } @@ -184,7 +184,7 @@ bool XReferenceProcessor::should_discover(oop reference, ReferenceType type) con bool XReferenceProcessor::should_drop(oop reference, ReferenceType type) const { const oop referent = reference_referent(reference); - if (referent == NULL) { + if (referent == nullptr) { // Reference has been cleared, by a call to Reference.enqueue() // or Reference.clear() from the application, which means we // should drop the reference. @@ -215,7 +215,7 @@ void XReferenceProcessor::make_inactive(oop reference, ReferenceType type) const // to finalize(). A FinalReference is instead made inactive by self-looping the // next field. An application can't call FinalReference.enqueue(), so there is // no race to worry about when setting the next field. - assert(reference_next(reference) == NULL, "Already inactive"); + assert(reference_next(reference) == nullptr, "Already inactive"); reference_set_next(reference, reference); } else { // Clear referent @@ -238,7 +238,7 @@ void XReferenceProcessor::discover(oop reference, ReferenceType type) { } // Add reference to discovered list - assert(reference_discovered(reference) == NULL, "Already discovered"); + assert(reference_discovered(reference) == nullptr, "Already discovered"); oop* const list = _discovered_list.addr(); reference_set_discovered(reference, *list); *list = reference; @@ -274,7 +274,7 @@ oop XReferenceProcessor::drop(oop reference, ReferenceType type) { // Unlink and return next in list const oop next = reference_discovered(reference); - reference_set_discovered(reference, NULL); + reference_set_discovered(reference, nullptr); return next; } @@ -296,7 +296,7 @@ void XReferenceProcessor::work() { oop* const list = _discovered_list.addr(); oop* p = list; - while (*p != NULL) { + while (*p != nullptr) { const oop reference = *p; const ReferenceType type = reference_type(reference); @@ -308,27 +308,27 @@ void XReferenceProcessor::work() { } // Prepend discovered references to internal pending list - if (*list != NULL) { + if (*list != nullptr) { *p = Atomic::xchg(_pending_list.addr(), *list); - if (*p == NULL) { + if (*p == nullptr) { // First to prepend to list, record tail _pending_list_tail = p; } // Clear discovered list - *list = NULL; + *list = nullptr; } } bool XReferenceProcessor::is_empty() const { XPerWorkerConstIterator iter(&_discovered_list); for (const oop* list; iter.next(&list);) { - if (*list != NULL) { + if (*list != nullptr) { return false; } } - if (_pending_list.get() != NULL) { + if (_pending_list.get() != nullptr) { return false; } @@ -437,7 +437,7 @@ void XReferenceProcessor::process_references() { void XReferenceProcessor::enqueue_references() { XStatTimer timer(XSubPhaseConcurrentReferencesEnqueue); - if (_pending_list.get() == NULL) { + if (_pending_list.get() == nullptr) { // Nothing to enqueue return; } @@ -454,6 +454,6 @@ void XReferenceProcessor::enqueue_references() { } // Reset internal pending list - _pending_list.set(NULL); + _pending_list.set(nullptr); _pending_list_tail = _pending_list.addr(); } diff --git a/src/hotspot/share/gc/x/xRelocate.cpp b/src/hotspot/share/gc/x/xRelocate.cpp index e13773242b3..c7b5287b474 100644 --- a/src/hotspot/share/gc/x/xRelocate.cpp +++ b/src/hotspot/share/gc/x/xRelocate.cpp @@ -129,7 +129,7 @@ static XPage* alloc_page(const XForwarding* forwarding) { if (ZStressRelocateInPlace) { // Simulate failure to allocate a new page. This will // cause the page being relocated to be relocated in-place. - return NULL; + return nullptr; } XAllocationFlags flags; @@ -147,7 +147,7 @@ static bool should_free_target_page(XPage* page) { // page if we allocated a new target page, and then lost the race to // relocate the remaining objects, leaving the target page empty when // relocation completed. - return page != NULL && page->top() == page->start(); + return page != nullptr && page->top() == page->start(); } class XRelocateSmallAllocator { @@ -160,7 +160,7 @@ public: XPage* alloc_target_page(XForwarding* forwarding, XPage* target) { XPage* const page = alloc_page(forwarding); - if (page == NULL) { + if (page == nullptr) { Atomic::inc(&_in_place_count); } @@ -182,7 +182,7 @@ public: } uintptr_t alloc_object(XPage* page, size_t size) const { - return (page != NULL) ? page->alloc_object(size) : 0; + return (page != nullptr) ? page->alloc_object(size) : 0; } void undo_alloc_object(XPage* page, uintptr_t addr, size_t size) const { @@ -204,7 +204,7 @@ private: public: XRelocateMediumAllocator() : _lock(), - _shared(NULL), + _shared(nullptr), _in_place(false), _in_place_count(0) {} @@ -228,7 +228,7 @@ public: // a new page. if (_shared == target) { _shared = alloc_page(forwarding); - if (_shared == NULL) { + if (_shared == nullptr) { Atomic::inc(&_in_place_count); _in_place = true; } @@ -241,8 +241,8 @@ public: XLocker locker(&_lock); assert(_in_place, "Invalid state"); - assert(_shared == NULL, "Invalid state"); - assert(page != NULL, "Invalid page"); + assert(_shared == nullptr, "Invalid state"); + assert(page != nullptr, "Invalid page"); _shared = page; _in_place = false; @@ -259,7 +259,7 @@ public: } uintptr_t alloc_object(XPage* page, size_t size) const { - return (page != NULL) ? page->alloc_object_atomic(size) : 0; + return (page != nullptr) ? page->alloc_object_atomic(size) : 0; } void undo_alloc_object(XPage* page, uintptr_t addr, size_t size) const { @@ -321,7 +321,7 @@ private: // relocated as the new target, which will cause it to be relocated // in-place. _target = _allocator->alloc_target_page(_forwarding, _target); - if (_target != NULL) { + if (_target != nullptr) { continue; } @@ -337,8 +337,8 @@ private: public: XRelocateClosure(Allocator* allocator) : _allocator(allocator), - _forwarding(NULL), - _target(NULL) {} + _forwarding(nullptr), + _target(nullptr) {} ~XRelocateClosure() { _allocator->free_target_page(_target); diff --git a/src/hotspot/share/gc/x/xRelocationSet.cpp b/src/hotspot/share/gc/x/xRelocationSet.cpp index aca1bb4f030..eeb42c4bf32 100644 --- a/src/hotspot/share/gc/x/xRelocationSet.cpp +++ b/src/hotspot/share/gc/x/xRelocationSet.cpp @@ -61,7 +61,7 @@ public: XRelocationSetInstallTask(XForwardingAllocator* allocator, const XRelocationSetSelector* selector) : XTask("XRelocationSetInstallTask"), _allocator(allocator), - _forwardings(NULL), + _forwardings(nullptr), _nforwardings(selector->small()->length() + selector->medium()->length()), _small_iter(selector->small()), _medium_iter(selector->medium()), @@ -109,7 +109,7 @@ public: XRelocationSet::XRelocationSet(XWorkers* workers) : _workers(workers), _allocator(), - _forwardings(NULL), + _forwardings(nullptr), _nforwardings(0) {} void XRelocationSet::install(const XRelocationSetSelector* selector) { diff --git a/src/hotspot/share/gc/x/xRelocationSetSelector.cpp b/src/hotspot/share/gc/x/xRelocationSetSelector.cpp index b009443d395..514e70b8743 100644 --- a/src/hotspot/share/gc/x/xRelocationSetSelector.cpp +++ b/src/hotspot/share/gc/x/xRelocationSetSelector.cpp @@ -91,14 +91,14 @@ void XRelocationSetSelectorGroup::semi_sort() { // Allocate destination array const int npages = _live_pages.length(); - XArray sorted_live_pages(npages, npages, NULL); + XArray sorted_live_pages(npages, npages, nullptr); // Sort pages into partitions XArrayIterator iter2(&_live_pages); for (XPage* page; iter2.next(&page);) { const size_t index = page->live_bytes() >> partition_size_shift; const int finger = partitions[index]++; - assert(sorted_live_pages.at(finger) == NULL, "Invalid finger"); + assert(sorted_live_pages.at(finger) == nullptr, "Invalid finger"); sorted_live_pages.at_put(finger, page); } diff --git a/src/hotspot/share/gc/x/xSafeDelete.inline.hpp b/src/hotspot/share/gc/x/xSafeDelete.inline.hpp index 6c8417593d4..7e428c710e8 100644 --- a/src/hotspot/share/gc/x/xSafeDelete.inline.hpp +++ b/src/hotspot/share/gc/x/xSafeDelete.inline.hpp @@ -95,6 +95,6 @@ XSafeDelete::XSafeDelete() : template XSafeDeleteNoLock::XSafeDeleteNoLock() : - XSafeDeleteImpl(NULL) {} + XSafeDeleteImpl(nullptr) {} #endif // SHARE_GC_X_XSAFEDELETE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xServiceability.cpp b/src/hotspot/share/gc/x/xServiceability.cpp index 6882896dfa9..f3b51b6bb4a 100644 --- a/src/hotspot/share/gc/x/xServiceability.cpp +++ b/src/hotspot/share/gc/x/xServiceability.cpp @@ -121,7 +121,7 @@ XServiceability::XServiceability(size_t min_capacity, size_t max_capacity) : _memory_pool(_min_capacity, _max_capacity), _cycle_memory_manager("ZGC Cycles", &_memory_pool), _pause_memory_manager("ZGC Pauses", &_memory_pool), - _counters(NULL) {} + _counters(nullptr) {} void XServiceability::initialize() { _counters = new XServiceabilityCounters(_min_capacity, _max_capacity); diff --git a/src/hotspot/share/gc/x/xStackWatermark.cpp b/src/hotspot/share/gc/x/xStackWatermark.cpp index 7be799f74f0..9710b4c5b21 100644 --- a/src/hotspot/share/gc/x/xStackWatermark.cpp +++ b/src/hotspot/share/gc/x/xStackWatermark.cpp @@ -38,7 +38,7 @@ XOnStackCodeBlobClosure::XOnStackCodeBlobClosure() : void XOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) { nmethod* const nm = cb->as_nmethod_or_null(); - if (nm != NULL) { + if (nm != nullptr) { const bool result = _bs_nm->nmethod_entry_barrier(nm); assert(result, "NMethod on-stack must be alive"); } @@ -59,7 +59,7 @@ XStackWatermark::XStackWatermark(JavaThread* jt) : _stats() {} OopClosure* XStackWatermark::closure_from_context(void* context) { - if (context != NULL) { + if (context != nullptr) { assert(XThread::is_worker(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context)); return reinterpret_cast(context); } else { diff --git a/src/hotspot/share/gc/x/xStat.cpp b/src/hotspot/share/gc/x/xStat.cpp index beb90cc1740..c445e951397 100644 --- a/src/hotspot/share/gc/x/xStat.cpp +++ b/src/hotspot/share/gc/x/xStat.cpp @@ -397,16 +397,16 @@ T* XStatIterableValue::insert() const { template void XStatIterableValue::sort() { T* first_unsorted = _first; - _first = NULL; + _first = nullptr; - while (first_unsorted != NULL) { + while (first_unsorted != nullptr) { T* const value = first_unsorted; first_unsorted = value->_next; - value->_next = NULL; + value->_next = nullptr; T** current = &_first; - while (*current != NULL) { + while (*current != nullptr) { // First sort by group, then by name const int group_cmp = strcmp((*current)->group(), value->group()); if ((group_cmp > 0) || (group_cmp == 0 && strcmp((*current)->name(), value->name()) > 0)) { @@ -881,12 +881,12 @@ XStat::XStat() : void XStat::sample_and_collect(XStatSamplerHistory* history) const { // Sample counters - for (const XStatCounter* counter = XStatCounter::first(); counter != NULL; counter = counter->next()) { + for (const XStatCounter* counter = XStatCounter::first(); counter != nullptr; counter = counter->next()) { counter->sample_and_reset(); } // Collect samples - for (const XStatSampler* sampler = XStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + for (const XStatSampler* sampler = XStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { XStatSamplerHistory& sampler_history = history[sampler->id()]; sampler_history.add(sampler->collect_and_reset()); } @@ -911,7 +911,7 @@ void XStat::print(LogTargetHandle log, const XStatSamplerHistory* history) const log.print(" Last 10s Last 10m Last 10h Total"); log.print(" Avg / Max Avg / Max Avg / Max Avg / Max"); - for (const XStatSampler* sampler = XStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + for (const XStatSampler* sampler = XStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { const XStatSamplerHistory& sampler_history = history[sampler->id()]; const XStatUnitPrinter printer = sampler->printer(); printer(log, *sampler, sampler_history); diff --git a/src/hotspot/share/gc/x/xStat.hpp b/src/hotspot/share/gc/x/xStat.hpp index 1ecaf9df492..4983e5fcab6 100644 --- a/src/hotspot/share/gc/x/xStat.hpp +++ b/src/hotspot/share/gc/x/xStat.hpp @@ -118,7 +118,7 @@ public: }; template uint32_t XStatIterableValue::_count = 0; -template T* XStatIterableValue::_first = NULL; +template T* XStatIterableValue::_first = nullptr; // // Stat sampler diff --git a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp index 594a7799d7d..7dc0a128b64 100644 --- a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp @@ -31,11 +31,11 @@ #include "runtime/javaThread.hpp" #include "runtime/stackWatermarkSet.inline.hpp" -XPerWorker* XThreadLocalAllocBuffer::_stats = NULL; +XPerWorker* XThreadLocalAllocBuffer::_stats = nullptr; void XThreadLocalAllocBuffer::initialize() { if (UseTLAB) { - assert(_stats == NULL, "Already initialized"); + assert(_stats == nullptr, "Already initialized"); _stats = new XPerWorker(); reset_statistics(); } diff --git a/src/hotspot/share/gc/x/xThreadLocalData.hpp b/src/hotspot/share/gc/x/xThreadLocalData.hpp index b4abaadd09c..adc72f6ca76 100644 --- a/src/hotspot/share/gc/x/xThreadLocalData.hpp +++ b/src/hotspot/share/gc/x/xThreadLocalData.hpp @@ -39,7 +39,7 @@ private: XThreadLocalData() : _address_bad_mask(0), _stacks(), - _invisible_root(NULL) {} + _invisible_root(nullptr) {} static XThreadLocalData* data(Thread* thread) { return thread->gc_data(); @@ -63,18 +63,18 @@ public: } static void set_invisible_root(Thread* thread, oop* root) { - assert(data(thread)->_invisible_root == NULL, "Already set"); + assert(data(thread)->_invisible_root == nullptr, "Already set"); data(thread)->_invisible_root = root; } static void clear_invisible_root(Thread* thread) { - assert(data(thread)->_invisible_root != NULL, "Should be set"); - data(thread)->_invisible_root = NULL; + assert(data(thread)->_invisible_root != nullptr, "Should be set"); + data(thread)->_invisible_root = nullptr; } template static void do_invisible_root(Thread* thread, T f) { - if (data(thread)->_invisible_root != NULL) { + if (data(thread)->_invisible_root != nullptr) { f(data(thread)->_invisible_root); } } diff --git a/src/hotspot/share/gc/x/xTracer.cpp b/src/hotspot/share/gc/x/xTracer.cpp index 6db2e0bcc9a..68fbdc28356 100644 --- a/src/hotspot/share/gc/x/xTracer.cpp +++ b/src/hotspot/share/gc/x/xTracer.cpp @@ -53,7 +53,7 @@ class XStatisticsCounterTypeConstant : public JfrSerializer { public: virtual void serialize(JfrCheckpointWriter& writer) { writer.write_count(XStatCounter::count()); - for (XStatCounter* counter = XStatCounter::first(); counter != NULL; counter = counter->next()) { + for (XStatCounter* counter = XStatCounter::first(); counter != nullptr; counter = counter->next()) { writer.write_key(counter->id()); writer.write(counter->name()); } @@ -64,7 +64,7 @@ class XStatisticsSamplerTypeConstant : public JfrSerializer { public: virtual void serialize(JfrCheckpointWriter& writer) { writer.write_count(XStatSampler::count()); - for (XStatSampler* sampler = XStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + for (XStatSampler* sampler = XStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { writer.write_key(sampler->id()); writer.write(sampler->name()); } @@ -85,13 +85,13 @@ static void register_jfr_type_serializers() { #endif // INCLUDE_JFR -XTracer* XTracer::_tracer = NULL; +XTracer* XTracer::_tracer = nullptr; XTracer::XTracer() : GCTracer(Z) {} void XTracer::initialize() { - assert(_tracer == NULL, "Already initialized"); + assert(_tracer == nullptr, "Already initialized"); _tracer = new XTracer(); JFR_ONLY(register_jfr_type_serializers()); } diff --git a/src/hotspot/share/gc/x/xUnload.cpp b/src/hotspot/share/gc/x/xUnload.cpp index 66bdf2e222b..27d429d3635 100644 --- a/src/hotspot/share/gc/x/xUnload.cpp +++ b/src/hotspot/share/gc/x/xUnload.cpp @@ -59,7 +59,7 @@ public: virtual void do_oop(oop* p) { const oop o = RawAccess<>::oop_load(p); - if (o != NULL && !_is_alive.do_object_b(o)) { + if (o != nullptr && !_is_alive.do_object_b(o)) { _is_unloading = true; } } diff --git a/src/hotspot/share/gc/x/xUnmapper.cpp b/src/hotspot/share/gc/x/xUnmapper.cpp index baa09769074..76b4f5e594f 100644 --- a/src/hotspot/share/gc/x/xUnmapper.cpp +++ b/src/hotspot/share/gc/x/xUnmapper.cpp @@ -45,11 +45,11 @@ XPage* XUnmapper::dequeue() { for (;;) { if (_stop) { - return NULL; + return nullptr; } XPage* const page = _queue.remove_first(); - if (page != NULL) { + if (page != nullptr) { return page; } @@ -85,7 +85,7 @@ void XUnmapper::unmap_and_destroy_page(XPage* page) { void XUnmapper::run_service() { for (;;) { XPage* const page = dequeue(); - if (page == NULL) { + if (page == nullptr) { // Stop return; } diff --git a/src/hotspot/share/gc/x/xVerify.cpp b/src/hotspot/share/gc/x/xVerify.cpp index ed3e224091c..525c4958d9b 100644 --- a/src/hotspot/share/gc/x/xVerify.cpp +++ b/src/hotspot/share/gc/x/xVerify.cpp @@ -53,7 +53,7 @@ static void z_verify_oop(oop* p) { const oop o = RawAccess<>::oop_load(p); - if (o != NULL) { + if (o != nullptr) { const uintptr_t addr = XOop::to_address(o); guarantee(XAddress::is_good(addr), BAD_OOP_ARG(o, p)); guarantee(oopDesc::is_oop(XOop::from_address(addr)), BAD_OOP_ARG(o, p)); @@ -62,7 +62,7 @@ static void z_verify_oop(oop* p) { static void z_verify_possibly_weak_oop(oop* p) { const oop o = RawAccess<>::oop_load(p); - if (o != NULL) { + if (o != nullptr) { const uintptr_t addr = XOop::to_address(o); guarantee(XAddress::is_good(addr) || XAddress::is_finalizable_good(addr), BAD_OOP_ARG(o, p)); guarantee(oopDesc::is_oop(XOop::from_address(XAddress::good(addr))), BAD_OOP_ARG(o, p)); @@ -220,7 +220,7 @@ public: _cl(cl) {} virtual void do_thread(Thread* thread) { - thread->oops_do_no_frames(_cl, NULL); + thread->oops_do_no_frames(_cl, nullptr); JavaThread* const jt = JavaThread::cast(thread); if (!jt->has_last_Java_frame()) { @@ -393,12 +393,12 @@ public: void XVerify::verify_frame_bad(const frame& fr, RegisterMap& register_map) { XVerifyBadOopClosure verify_cl; - fr.oops_do(&verify_cl, NULL, ®ister_map, DerivedPointerIterationMode::_ignore); + fr.oops_do(&verify_cl, nullptr, ®ister_map, DerivedPointerIterationMode::_ignore); } void XVerify::verify_thread_head_bad(JavaThread* jt) { XVerifyBadOopClosure verify_cl; - jt->oops_do_no_frames(&verify_cl, NULL); + jt->oops_do_no_frames(&verify_cl, nullptr); } void XVerify::verify_thread_frames_bad(JavaThread* jt) { @@ -407,7 +407,7 @@ void XVerify::verify_thread_frames_bad(JavaThread* jt) { StackWatermarkProcessingMark swpm(Thread::current()); // Traverse the execution stack for (StackFrameStream fst(jt, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) { - fst.current()->oops_do(&verify_cl, NULL /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore); + fst.current()->oops_do(&verify_cl, nullptr /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore); } } } diff --git a/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp b/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp index 27eaead98fe..0271fcd8c3d 100644 --- a/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp +++ b/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp @@ -42,7 +42,7 @@ public: // oop here again (the object would be strongly live and we would // not consider clearing such oops), so therefore we don't have an // ABA problem here. - Atomic::cmpxchg(p, obj, oop(NULL)); + Atomic::cmpxchg(p, obj, oop(nullptr)); } }