8301495: Replace NULL with nullptr in cpu/ppc

Reviewed-by: rrich, mdoerr, tsteele
This commit is contained in:
Johan Sjölen 2023-04-14 08:54:53 +00:00
parent c0c31224db
commit 0826ceee65
51 changed files with 381 additions and 381 deletions

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015 SAP SE. All rights reserved. * Copyright (c) 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -94,9 +94,9 @@ int AbstractInterpreter::size_activation(int max_stack,
// //
// Parameters: // Parameters:
// //
// interpreter_frame != NULL: // interpreter_frame != nullptr:
// set up the method, locals, and monitors. // set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the // The frame interpreter_frame, if not null, is guaranteed to be the
// right size, as determined by a previous call to this method. // right size, as determined by a previous call to this method.
// It is also guaranteed to be walkable even though it is in a skeletal state // It is also guaranteed to be walkable even though it is in a skeletal state
// //

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2022 SAP SE. All rights reserved. * Copyright (c) 2012, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -95,7 +95,7 @@ class AddressLiteral {
protected: protected:
// creation // creation
AddressLiteral() : _address(NULL), _rspec() {} AddressLiteral() : _address(nullptr), _rspec() {}
public: public:
AddressLiteral(address addr, RelocationHolder const& rspec) AddressLiteral(address addr, RelocationHolder const& rspec)
@ -1349,14 +1349,14 @@ class Assembler : public AbstractAssembler {
inline void emit_data(int, relocInfo::relocType rtype); inline void emit_data(int, relocInfo::relocType rtype);
// Emit an address. // Emit an address.
inline address emit_addr(const address addr = NULL); inline address emit_addr(const address addr = nullptr);
#if !defined(ABI_ELFv2) #if !defined(ABI_ELFv2)
// Emit a function descriptor with the specified entry point, TOC, // Emit a function descriptor with the specified entry point, TOC,
// and ENV. If the entry point is NULL, the descriptor will point // and ENV. If the entry point is null, the descriptor will point
// just past the descriptor. // just past the descriptor.
// Use values from friend functions as defaults. // Use values from friend functions as defaults.
inline address emit_fd(address entry = NULL, inline address emit_fd(address entry = nullptr,
address toc = (address) FunctionDescriptor::friend_toc, address toc = (address) FunctionDescriptor::friend_toc,
address env = (address) FunctionDescriptor::friend_env); address env = (address) FunctionDescriptor::friend_env);
#endif #endif

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 SAP SE. All rights reserved. * Copyright (c) 2012, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -58,7 +58,7 @@ inline address Assembler::emit_addr(const address addr) {
#if !defined(ABI_ELFv2) #if !defined(ABI_ELFv2)
// Emit a function descriptor with the specified entry point, TOC, and // Emit a function descriptor with the specified entry point, TOC, and
// ENV. If the entry point is NULL, the descriptor will point just // ENV. If the entry point is null, the descriptor will point just
// past the descriptor. // past the descriptor.
inline address Assembler::emit_fd(address entry, address toc, address env) { inline address Assembler::emit_fd(address entry, address toc, address env) {
FunctionDescriptor* fd = (FunctionDescriptor*)pc(); FunctionDescriptor* fd = (FunctionDescriptor*)pc();
@ -69,7 +69,7 @@ inline address Assembler::emit_fd(address entry, address toc, address env) {
(void)emit_addr(); (void)emit_addr();
(void)emit_addr(); (void)emit_addr();
fd->set_entry(entry == NULL ? pc() : entry); fd->set_entry(entry == nullptr ? pc() : entry);
fd->set_toc(toc); fd->set_toc(toc);
fd->set_env(env); fd->set_env(env);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved. * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -42,7 +42,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
if (UseSIGTRAP) { if (UseSIGTRAP) {
DEBUG_ONLY( __ should_not_reach_here("C1SafepointPollStub::emit_code"); ) DEBUG_ONLY( __ should_not_reach_here("C1SafepointPollStub::emit_code"); )
} else { } else {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet"); "polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
@ -336,12 +336,12 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
if (_id == load_klass_id) { if (_id == load_klass_id) {
// Produce a copy of the load klass instruction for use by the being initialized case. // Produce a copy of the load klass instruction for use by the being initialized case.
AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index)); AddressLiteral addrlit((address)nullptr, metadata_Relocation::spec(_index));
__ load_const(_obj, addrlit, R0); __ load_const(_obj, addrlit, R0);
DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); ) DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
} else if (_id == load_mirror_id || _id == load_appendix_id) { } else if (_id == load_mirror_id || _id == load_appendix_id) {
// Produce a copy of the load mirror instruction for use by the being initialized case. // Produce a copy of the load mirror instruction for use by the being initialized case.
AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index)); AddressLiteral addrlit((address)nullptr, oop_Relocation::spec(_index));
__ load_const(_obj, addrlit, R0); __ load_const(_obj, addrlit, R0);
DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); ) DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
} else { } else {
@ -400,7 +400,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
address entry = __ pc(); address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry); NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL; address target = nullptr;
relocInfo::relocType reloc_type = relocInfo::none; relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) { switch (_id) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2022 SAP SE. All rights reserved. * Copyright (c) 2012, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -150,7 +150,7 @@ void LIR_Assembler::osr_entry() {
__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
__ cmpdi(CCR0, R0, 0); __ cmpdi(CCR0, R0, 0);
__ bne(CCR0, L); __ bne(CCR0, L);
__ stop("locked object is NULL"); __ stop("locked object is null");
__ bind(L); __ bind(L);
} }
#endif // ASSERT #endif // ASSERT
@ -171,7 +171,7 @@ int LIR_Assembler::emit_exception_handler() {
// Generate code for the exception handler. // Generate code for the exception handler.
address handler_base = __ start_a_stub(exception_handler_size()); address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) { if (handler_base == nullptr) {
// Not enough space left for the handler. // Not enough space left for the handler.
bailout("exception handler overflow"); bailout("exception handler overflow");
return -1; return -1;
@ -211,7 +211,7 @@ int LIR_Assembler::emit_unwind_handler() {
if (preserve_exception) { __ mr(Rexception_save, Rexception); } if (preserve_exception) { __ mr(Rexception_save, Rexception); }
// Perform needed unlocking // Perform needed unlocking
MonitorExitStub* stub = NULL; MonitorExitStub* stub = nullptr;
if (method()->is_synchronized()) { if (method()->is_synchronized()) {
monitor_address(0, FrameMap::R4_opr); monitor_address(0, FrameMap::R4_opr);
stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); stub = new MonitorExitStub(FrameMap::R4_opr, true, 0);
@ -232,7 +232,7 @@ int LIR_Assembler::emit_unwind_handler() {
__ bctr(); __ bctr();
// Emit the slow path assembly. // Emit the slow path assembly.
if (stub != NULL) { if (stub != nullptr) {
stub->emit_code(this); stub->emit_code(this);
} }
@ -244,7 +244,7 @@ int LIR_Assembler::emit_deopt_handler() {
// Generate code for deopt handler. // Generate code for deopt handler.
address handler_base = __ start_a_stub(deopt_handler_size()); address handler_base = __ start_a_stub(deopt_handler_size());
if (handler_base == NULL) { if (handler_base == nullptr) {
// Not enough space left for the handler. // Not enough space left for the handler.
bailout("deopt handler overflow"); bailout("deopt handler overflow");
return -1; return -1;
@ -261,7 +261,7 @@ int LIR_Assembler::emit_deopt_handler() {
void LIR_Assembler::jobject2reg(jobject o, Register reg) { void LIR_Assembler::jobject2reg(jobject o, Register reg) {
if (o == NULL) { if (o == nullptr) {
__ li(reg, 0); __ li(reg, 0);
} else { } else {
AddressLiteral addrlit = __ constant_oop_address(o); AddressLiteral addrlit = __ constant_oop_address(o);
@ -272,10 +272,10 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the object once it's been patched. // Allocate a new index in table to hold the object once it's been patched.
int oop_index = __ oop_recorder()->allocate_oop_index(NULL); int oop_index = __ oop_recorder()->allocate_oop_index(nullptr);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index)); AddressLiteral addrlit((address)nullptr, oop_Relocation::spec(oop_index));
__ load_const(reg, addrlit, R0); __ load_const(reg, addrlit, R0);
patching_epilog(patch, lir_patch_normal, reg, info); patching_epilog(patch, lir_patch_normal, reg, info);
@ -290,10 +290,10 @@ void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the klass once it's been patched. // Allocate a new index in table to hold the klass once it's been patched.
int index = __ oop_recorder()->allocate_metadata_index(NULL); int index = __ oop_recorder()->allocate_metadata_index(nullptr);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index)); AddressLiteral addrlit((address)nullptr, metadata_Relocation::spec(index));
assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
__ load_const(reg, addrlit, R0); __ load_const(reg, addrlit, R0);
@ -446,10 +446,10 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
#ifdef ASSERT #ifdef ASSERT
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label");
if (op->block() != NULL) _branch_target_blocks.append(op->block()); if (op->block() != nullptr) _branch_target_blocks.append(op->block());
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock());
assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); assert(op->info() == nullptr, "shouldn't have CodeEmitInfo");
#endif #endif
Label *L = op->label(); Label *L = op->label();
@ -459,7 +459,7 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
Label done; Label done;
bool is_unordered = false; bool is_unordered = false;
if (op->code() == lir_cond_float_branch) { if (op->code() == lir_cond_float_branch) {
assert(op->ublock() != NULL, "must have unordered successor"); assert(op->ublock() != nullptr, "must have unordered successor");
is_unordered = true; is_unordered = true;
} else { } else {
assert(op->code() == lir_branch, "just checking"); assert(op->code() == lir_branch, "just checking");
@ -636,7 +636,7 @@ bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc)
int start_offset = __ offset(); int start_offset = __ offset();
// Put the entry point as a constant into the constant pool. // Put the entry point as a constant into the constant pool.
const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none);
if (entry_point_toc_addr == NULL) { if (entry_point_toc_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
return false; return false;
} }
@ -914,7 +914,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
// Null check for large offsets in LIRGenerator::do_StoreField. // Null check for large offsets in LIRGenerator::do_StoreField.
bool needs_explicit_null_check = !ImplicitNullChecks; bool needs_explicit_null_check = !ImplicitNullChecks;
if (info != NULL && needs_explicit_null_check) { if (info != nullptr && needs_explicit_null_check) {
explicit_null_check(base, info); explicit_null_check(base, info);
} }
@ -934,7 +934,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
} }
case T_OBJECT: { case T_OBJECT: {
tmp = FrameMap::R0_opr; tmp = FrameMap::R0_opr;
if (UseCompressedOops && !wide && c->as_jobject() != NULL) { if (UseCompressedOops && !wide && c->as_jobject() != nullptr) {
AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject()); AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject());
// Don't care about sign extend (will use stw). // Don't care about sign extend (will use stw).
__ lis(R0, 0); // Will get patched. __ lis(R0, 0); // Will get patched.
@ -958,7 +958,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
offset = store(tmp, base, addr->disp(), type, wide); offset = store(tmp, base, addr->disp(), type, wide);
} }
if (info != NULL) { if (info != nullptr) {
assert(offset != -1, "offset should've been set"); assert(offset != -1, "offset should've been set");
if (!needs_explicit_null_check) { if (!needs_explicit_null_check) {
add_debug_info_for_null_check(offset, info); add_debug_info_for_null_check(offset, info);
@ -1011,7 +1011,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
{ {
if (to_reg->is_single_fpu()) { if (to_reg->is_single_fpu()) {
address const_addr = __ float_constant(c->as_jfloat()); address const_addr = __ float_constant(c->as_jfloat());
if (const_addr == NULL) { if (const_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
break; break;
} }
@ -1030,7 +1030,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
{ {
if (to_reg->is_double_fpu()) { if (to_reg->is_double_fpu()) {
address const_addr = __ double_constant(c->as_jdouble()); address const_addr = __ double_constant(c->as_jdouble());
if (const_addr == NULL) { if (const_addr == nullptr) {
bailout("const section overflow"); bailout("const section overflow");
break; break;
} }
@ -1123,7 +1123,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
// null check for large offsets in LIRGenerator::do_LoadField // null check for large offsets in LIRGenerator::do_LoadField
bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks; bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks;
if (info != NULL && needs_explicit_null_check) { if (info != nullptr && needs_explicit_null_check) {
explicit_null_check(src, info); explicit_null_check(src, info);
} }
@ -1131,7 +1131,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
__ verify_oop(src, FILE_AND_LINE); __ verify_oop(src, FILE_AND_LINE);
} }
PatchingStub* patch = NULL; PatchingStub* patch = nullptr;
if (needs_patching) { if (needs_patching) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
assert(!to_reg->is_double_cpu() || assert(!to_reg->is_double_cpu() ||
@ -1165,10 +1165,10 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
offset = load(src, disp_reg, to_reg, type, wide); offset = load(src, disp_reg, to_reg, type, wide);
} }
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, patch_code, src, info); patching_epilog(patch, patch_code, src, info);
} }
if (info != NULL && !needs_explicit_null_check) { if (info != nullptr && !needs_explicit_null_check) {
add_debug_info_for_null_check(offset, info); add_debug_info_for_null_check(offset, info);
} }
} }
@ -1245,7 +1245,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
// Null check for large offsets in LIRGenerator::do_StoreField. // Null check for large offsets in LIRGenerator::do_StoreField.
bool needs_explicit_null_check = !ImplicitNullChecks || use_R29; bool needs_explicit_null_check = !ImplicitNullChecks || use_R29;
if (info != NULL && needs_explicit_null_check) { if (info != nullptr && needs_explicit_null_check) {
explicit_null_check(src, info); explicit_null_check(src, info);
} }
@ -1253,7 +1253,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
__ verify_oop(src, FILE_AND_LINE); __ verify_oop(src, FILE_AND_LINE);
} }
PatchingStub* patch = NULL; PatchingStub* patch = nullptr;
if (needs_patching) { if (needs_patching) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id); patch = new PatchingStub(_masm, PatchingStub::access_field_id);
assert(!from_reg->is_double_cpu() || assert(!from_reg->is_double_cpu() ||
@ -1296,11 +1296,11 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
__ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit
} }
if (patch != NULL) { if (patch != nullptr) {
patching_epilog(patch, patch_code, src, info); patching_epilog(patch, patch_code, src, info);
} }
if (info != NULL && !needs_explicit_null_check) { if (info != nullptr && !needs_explicit_null_check) {
add_debug_info_for_null_check(offset, info); add_debug_info_for_null_check(offset, info);
} }
} }
@ -1343,7 +1343,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
const Register poll_addr = tmp->as_register(); const Register poll_addr = tmp->as_register();
__ ld(poll_addr, in_bytes(JavaThread::polling_page_offset()), R16_thread); __ ld(poll_addr, in_bytes(JavaThread::polling_page_offset()), R16_thread);
if (info != NULL) { if (info != nullptr) {
add_debug_info_for_branch(info); add_debug_info_for_branch(info);
} }
int offset = __ offset(); int offset = __ offset();
@ -1357,7 +1357,7 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
void LIR_Assembler::emit_static_call_stub() { void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc(); address call_pc = __ pc();
address stub = __ start_a_stub(static_call_stub_size()); address stub = __ start_a_stub(static_call_stub_size());
if (stub == NULL) { if (stub == nullptr) {
bailout("static call stub overflow"); bailout("static call stub overflow");
return; return;
} }
@ -1379,7 +1379,7 @@ void LIR_Assembler::emit_static_call_stub() {
// - load the call target from the constant pool // - load the call target from the constant pool
// - call // - call
__ calculate_address_from_global_toc(reg_scratch, __ method_toc()); __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); AddressLiteral ic = __ allocate_metadata_address((Metadata *)nullptr);
bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true); bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);
if (ReoptimizeCallSequences) { if (ReoptimizeCallSequences) {
@ -1435,7 +1435,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
{ {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
jobject con = opr2->as_constant_ptr()->as_jobject(); jobject con = opr2->as_constant_ptr()->as_jobject();
if (con == NULL) { if (con == nullptr) {
__ cmpdi(BOOL_RESULT, opr1->as_register(), 0); __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
} else { } else {
jobject2reg(con, R0); jobject2reg(con, R0);
@ -1445,11 +1445,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
break; break;
case T_METADATA: case T_METADATA:
// We only need, for now, comparison with NULL for metadata. // We only need, for now, comparison with null for metadata.
{ {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* p = opr2->as_constant_ptr()->as_metadata(); Metadata* p = opr2->as_constant_ptr()->as_metadata();
if (p == NULL) { if (p == nullptr) {
__ cmpdi(BOOL_RESULT, opr1->as_register(), 0); __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
@ -1531,7 +1531,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) { inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) {
if (src->is_constant()) { if (src->is_constant()) {
lasm->const2reg(src, dst, lir_patch_none, NULL); lasm->const2reg(src, dst, lir_patch_none, nullptr);
} else if (src->is_register()) { } else if (src->is_register()) {
lasm->reg2reg(src, dst); lasm->reg2reg(src, dst);
} else if (src->is_stack()) { } else if (src->is_stack()) {
@ -1601,7 +1601,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
CodeEmitInfo* info, bool pop_fpu_stack) { CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "unused on this code path"); assert(info == nullptr, "unused on this code path");
assert(left->is_register(), "wrong items state"); assert(left->is_register(), "wrong items state");
assert(dest->is_register(), "wrong items state"); assert(dest->is_register(), "wrong items state");
@ -1829,7 +1829,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
int flags = op->flags(); int flags = op->flags();
ciArrayKlass* default_type = op->expected_type(); ciArrayKlass* default_type = op->expected_type();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT; if (basic_type == T_ARRAY) basic_type = T_OBJECT;
// Set up the arraycopy stub information. // Set up the arraycopy stub information.
@ -1840,11 +1840,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// the known type isn't loaded since the code sanity checks // the known type isn't loaded since the code sanity checks
// in debug mode and the type isn't required when we know the exact type // in debug mode and the type isn't required when we know the exact type
// also check that the type is an array type. // also check that the type is an array type.
if (op->expected_type() == NULL) { if (op->expected_type() == nullptr) {
assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() &&
length->is_nonvolatile(), "must preserve"); length->is_nonvolatile(), "must preserve");
address copyfunc_addr = StubRoutines::generic_arraycopy(); address copyfunc_addr = StubRoutines::generic_arraycopy();
assert(copyfunc_addr != NULL, "generic arraycopy stub required"); assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
// 3 parms are int. Convert to long. // 3 parms are int. Convert to long.
__ mr(R3_ARG1, src); __ mr(R3_ARG1, src);
@ -1875,7 +1875,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
return; return;
} }
assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); assert(default_type != nullptr && default_type->is_array_klass(), "must be true at this point");
Label cont, slow, copyfunc; Label cont, slow, copyfunc;
bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check | bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check |
@ -1998,7 +1998,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ load_klass(super_klass, dst); __ load_klass(super_klass, dst);
__ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2,
&cont, copyfunc_addr != NULL ? &copyfunc : &slow, NULL); &cont, copyfunc_addr != nullptr ? &copyfunc : &slow, nullptr);
address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
//__ load_const_optimized(tmp, slow_stc, tmp2); //__ load_const_optimized(tmp, slow_stc, tmp2);
@ -2007,7 +2007,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ bctrl(); // sets CR0 __ bctrl(); // sets CR0
__ beq(CCR0, cont); __ beq(CCR0, cont);
if (copyfunc_addr != NULL) { // Use stub if available. if (copyfunc_addr != nullptr) { // Use stub if available.
__ bind(copyfunc); __ bind(copyfunc);
// Src is not a sub class of dst so we have to do a // Src is not a sub class of dst so we have to do a
// per-element check. // per-element check.
@ -2348,9 +2348,9 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
void LIR_Assembler::setup_md_access(ciMethod* method, int bci, void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
md = method->method_data_or_null(); md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != nullptr, "Sanity");
data = md->bci_to_data(bci); data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast"); assert(data != nullptr, "need data for checkcast");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so // The offset is large so bias the mdo by the base of the slot so
@ -2384,12 +2384,12 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ cmpdi(CCR0, obj, 0); __ cmpdi(CCR0, obj, 0);
ciMethodData* md = NULL; ciMethodData* md = nullptr;
ciProfileData* data = NULL; ciProfileData* data = nullptr;
int mdo_offset_bias = 0; int mdo_offset_bias = 0;
if (should_profile) { if (should_profile) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); assert(method != nullptr, "Should have method");
setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
Register mdo = k_RInfo; Register mdo = k_RInfo;
@ -2437,8 +2437,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
need_slow_path = false; need_slow_path = false;
} }
// Perform the fast part of the checking logic. // Perform the fast part of the checking logic.
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL), __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : nullptr),
failure_target, NULL, RegisterOrConstant(k->super_check_offset())); failure_target, nullptr, RegisterOrConstant(k->super_check_offset()));
} else { } else {
// Perform the fast part of the checking logic. // Perform the fast part of the checking logic.
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target);
@ -2512,12 +2512,12 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ verify_oop(value, FILE_AND_LINE); __ verify_oop(value, FILE_AND_LINE);
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
// Check if it needs to be profiled. // Check if it needs to be profiled.
ciMethodData* md = NULL; ciMethodData* md = nullptr;
ciProfileData* data = NULL; ciProfileData* data = nullptr;
int mdo_offset_bias = 0; int mdo_offset_bias = 0;
if (should_profile) { if (should_profile) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); assert(method != nullptr, "Should have method");
setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
} }
Label profile_cast_success, failure, done; Label profile_cast_success, failure, done;
@ -2550,7 +2550,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// Get instance klass. // Get instance klass.
__ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo); __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo);
// Perform the fast part of the checking logic. // Perform the fast part of the checking logic.
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, nullptr);
// Call out-of-line instance of __ check_klass_subtype_slow_path(...): // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
@ -2639,7 +2639,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
__ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
MacroAssembler::MemBarNone, MacroAssembler::MemBarNone,
MacroAssembler::cmpxchgx_hint_atomic_update(), MacroAssembler::cmpxchgx_hint_atomic_update(),
noreg, NULL, /*check without ldarx first*/true); noreg, nullptr, /*check without ldarx first*/true);
} else { } else {
__ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
MacroAssembler::MemBarNone, MacroAssembler::MemBarNone,
@ -2689,7 +2689,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
if (!UseHeavyMonitors) { if (!UseHeavyMonitors) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// Add debug info for NullPointerException only if one is possible. // Add debug info for NullPointerException only if one is possible.
if (op->info() != NULL) { if (op->info() != nullptr) {
if (!os::zero_page_read_protected() || !ImplicitNullChecks) { if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
explicit_null_check(obj, op->info()); explicit_null_check(obj, op->info());
} else { } else {
@ -2704,7 +2704,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
// simpler and requires less duplicated code - additionally, the // simpler and requires less duplicated code - additionally, the
// slow locking code is the same in either case which simplifies // slow locking code is the same in either case which simplifies
// debugging. // debugging.
if (op->info() != NULL) { if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info()); add_debug_info_for_null_check_here(op->info());
__ null_check(obj); __ null_check(obj);
} }
@ -2733,7 +2733,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
Register result = op->result_opr()->as_pointer_register(); Register result = op->result_opr()->as_pointer_register();
CodeEmitInfo* info = op->info(); CodeEmitInfo* info = op->info();
if (info != NULL) { if (info != nullptr) {
if (!os::zero_page_read_protected() || !ImplicitNullChecks) { if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
explicit_null_check(obj, info); explicit_null_check(obj, info);
} else { } else {
@ -2756,9 +2756,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Update counter for all call types. // Update counter for all call types.
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
assert(md != NULL, "Sanity"); assert(md != nullptr, "Sanity");
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); assert(data != nullptr && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
#ifdef _LP64 #ifdef _LP64
@ -2786,7 +2786,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert_different_registers(mdo, tmp1, recv); assert_different_registers(mdo, tmp1, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); ciKlass* known_klass = op->known_holder();
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) {
// We know the type that will be seen at this call site; we can // We know the type that will be seen at this call site; we can
// statically update the MethodData* rather than needing to do // statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type. // dynamic tests on the receiver type.
@ -2812,7 +2812,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// VirtualCallData rather than just the first time. // VirtualCallData rather than just the first time.
for (i = 0; i < VirtualCallData::row_limit(); i++) { for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i); ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) { if (receiver == nullptr) {
metadata2reg(known_klass->constant_encoding(), tmp1); metadata2reg(known_klass->constant_encoding(), tmp1);
__ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo); __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo);
@ -2880,14 +2880,14 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest));
__ mtctr(R0); __ mtctr(R0);
__ bctrl(); __ bctrl();
assert(info != NULL, "sanity"); assert(info != nullptr, "sanity");
add_call_info_here(info); add_call_info_here(info);
__ post_call_nop(); __ post_call_nop();
return; return;
} }
__ call_c_with_frame_resize(dest, /*no resizing*/ 0); __ call_c_with_frame_resize(dest, /*no resizing*/ 0);
if (info != NULL) { if (info != nullptr) {
add_call_info_here(info); add_call_info_here(info);
} }
__ post_call_nop(); __ post_call_nop();
@ -3072,7 +3072,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Label Lupdate, Ldo_update, Ldone; Label Lupdate, Ldo_update, Ldone;
bool do_null = !not_null; bool do_null = !not_null;
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
assert(do_null || do_update, "why are we here?"); assert(do_null || do_update, "why are we here?");
@ -3111,7 +3111,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
const Register klass = R29_TOC; // kill and reload const Register klass = R29_TOC; // kill and reload
bool klass_reg_used = false; bool klass_reg_used = false;
#ifdef ASSERT #ifdef ASSERT
if (exact_klass != NULL) { if (exact_klass != nullptr) {
Label ok; Label ok;
klass_reg_used = true; klass_reg_used = true;
__ load_klass(klass, obj); __ load_klass(klass, obj);
@ -3124,9 +3124,9 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#endif #endif
if (!no_conflict) { if (!no_conflict) {
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) {
klass_reg_used = true; klass_reg_used = true;
if (exact_klass != NULL) { if (exact_klass != nullptr) {
__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
metadata2reg(exact_klass->constant_encoding(), klass); metadata2reg(exact_klass->constant_encoding(), klass);
} else { } else {
@ -3154,7 +3154,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
} }
} else { } else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
@ -3167,7 +3167,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ ori(R0, tmp, TypeEntries::type_unknown); __ ori(R0, tmp, TypeEntries::type_unknown);
} else { } else {
// There's a single possible klass at this profile point // There's a single possible klass at this profile point
assert(exact_klass != NULL, "should be"); assert(exact_klass != nullptr, "should be");
__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
if (TypeEntries::is_type_none(current_klass)) { if (TypeEntries::is_type_none(current_klass)) {
@ -3192,7 +3192,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
// First time here. Set profile type. // First time here. Set profile type.
__ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
} else { } else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
// Already unknown. Nothing to do anymore. // Already unknown. Nothing to do anymore.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -108,11 +108,11 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
// PPC cannot inline all constants. // PPC cannot inline all constants.
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
if (v->type()->as_IntConstant() != NULL) { if (v->type()->as_IntConstant() != nullptr) {
return Assembler::is_simm16(v->type()->as_IntConstant()->value()); return Assembler::is_simm16(v->type()->as_IntConstant()->value());
} else if (v->type()->as_LongConstant() != NULL) { } else if (v->type()->as_LongConstant() != nullptr) {
return Assembler::is_simm16(v->type()->as_LongConstant()->value()); return Assembler::is_simm16(v->type()->as_LongConstant()->value());
} else if (v->type()->as_ObjectConstant() != NULL) { } else if (v->type()->as_ObjectConstant() != nullptr) {
return v->type()->as_ObjectConstant()->value()->is_null_object(); return v->type()->as_ObjectConstant()->value()->is_null_object();
} else { } else {
return false; return false;
@ -134,7 +134,7 @@ bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
return Assembler::is_simm16(c->as_jlong()); return Assembler::is_simm16(c->as_jlong());
} }
if (c->type() == T_OBJECT) { if (c->type() == T_OBJECT) {
return c->as_jobject() == NULL; return c->as_jobject() == nullptr;
} }
return false; return false;
} }
@ -338,7 +338,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
LIR_Opr scratch = FrameMap::R4_opr; LIR_Opr scratch = FrameMap::R4_opr;
LIR_Opr hdr = FrameMap::R6_opr; LIR_Opr hdr = FrameMap::R6_opr;
CodeEmitInfo* info_for_exception = NULL; CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) { if (x->needs_null_check()) {
info_for_exception = state_for(x); info_for_exception = state_for(x);
} }
@ -395,7 +395,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
case Bytecodes::_frem: case Bytecodes::_frem:
case Bytecodes::_drem: { case Bytecodes::_drem: {
address entry = NULL; address entry = nullptr;
switch (x->op()) { switch (x->op()) {
case Bytecodes::_frem: case Bytecodes::_frem:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
@ -406,7 +406,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL); LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), nullptr);
set_result(x, result); set_result(x, result);
} }
break; break;
@ -457,7 +457,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
left.load_item(); left.load_item();
rlock_result(x); rlock_result(x);
if (is_div_rem) { if (is_div_rem) {
CodeEmitInfo* info = NULL; // Null check already done above. CodeEmitInfo* info = nullptr; // Null check already done above.
LIR_Opr tmp = FrameMap::R0_opr; LIR_Opr tmp = FrameMap::R0_opr;
if (x->op() == Bytecodes::_lrem) { if (x->op() == Bytecodes::_lrem) {
__ irem(left.result(), right.result(), x->operand(), tmp, info); __ irem(left.result(), right.result(), x->operand(), tmp, info);
@ -465,7 +465,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
__ idiv(left.result(), right.result(), x->operand(), tmp, info); __ idiv(left.result(), right.result(), x->operand(), tmp, info);
} }
} else { } else {
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr);
} }
} }
@ -511,7 +511,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
left.load_item(); left.load_item();
rlock_result(x); rlock_result(x);
if (is_div_rem) { if (is_div_rem) {
CodeEmitInfo* info = NULL; // Null check already done above. CodeEmitInfo* info = nullptr; // Null check already done above.
LIR_Opr tmp = FrameMap::R0_opr; LIR_Opr tmp = FrameMap::R0_opr;
if (x->op() == Bytecodes::_irem) { if (x->op() == Bytecodes::_irem) {
__ irem(left.result(), right.result(), x->operand(), tmp, info); __ irem(left.result(), right.result(), x->operand(), tmp, info);
@ -741,7 +741,7 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
case vmIntrinsics::_dexp: { case vmIntrinsics::_dexp: {
assert(x->number_of_arguments() == 1, "wrong type"); assert(x->number_of_arguments() == 1, "wrong type");
address runtime_entry = NULL; address runtime_entry = nullptr;
switch (x->id()) { switch (x->id()) {
case vmIntrinsics::_dsqrt: case vmIntrinsics::_dsqrt:
case vmIntrinsics::_dsqrt_strict: case vmIntrinsics::_dsqrt_strict:
@ -769,14 +769,14 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
ShouldNotReachHere(); ShouldNotReachHere();
} }
LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), nullptr);
set_result(x, result); set_result(x, result);
break; break;
} }
case vmIntrinsics::_dpow: { case vmIntrinsics::_dpow: {
assert(x->number_of_arguments() == 2, "wrong type"); assert(x->number_of_arguments() == 2, "wrong type");
address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL); LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), nullptr);
set_result(x, result); set_result(x, result);
break; break;
} }
@ -829,7 +829,7 @@ void LIRGenerator::do_Convert(Convert* x) {
if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
// fcfid+frsp needs fixup code to avoid rounding incompatibility. // fcfid+frsp needs fixup code to avoid rounding incompatibility.
address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f); address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL); LIR_Opr result = call_runtime(x->value(), entry, x->type(), nullptr);
set_result(x, result); set_result(x, result);
return; return;
} // else fallthru } // else fallthru
@ -969,7 +969,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
// In case of patching (i.e., object class is not yet loaded), // In case of patching (i.e., object class is not yet loaded),
// we need to reexecute the instruction and therefore provide // we need to reexecute the instruction and therefore provide
// the state before the parameters have been consumed. // the state before the parameters have been consumed.
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
} }
@ -1005,14 +1005,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims(); Values* dims = x->dims();
int i = dims->length(); int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL); LIRItemList* items = new LIRItemList(i, i, nullptr);
while (i-- > 0) { while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this); LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size); items->at_put(i, size);
} }
// Evaluate state_for early since it may emit code. // Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
@ -1069,7 +1069,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) { void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this); LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// Must do this before locking the destination register as // Must do this before locking the destination register as
// an oop register, and before the obj is loaded (so x->obj()->item() // an oop register, and before the obj is loaded (so x->obj()->item()
@ -1084,11 +1084,11 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
state_for(x, x->state_before(), true /*ignore_xhandler*/)); state_for(x, x->state_before(), true /*ignore_xhandler*/));
if (x->is_incompatible_class_change_check()) { if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this"); assert(patching_info == nullptr, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
LIR_OprFact::illegalOpr, info_for_exception); LIR_OprFact::illegalOpr, info_for_exception);
} else if (x->is_invokespecial_receiver_check()) { } else if (x->is_invokespecial_receiver_check()) {
assert(patching_info == NULL, "can't patch this"); assert(patching_info == nullptr, "can't patch this");
stub = new DeoptimizeStub(info_for_exception, stub = new DeoptimizeStub(info_for_exception,
Deoptimization::Reason_class_check, Deoptimization::Reason_class_check,
Deoptimization::Action_none); Deoptimization::Action_none);
@ -1107,7 +1107,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
void LIRGenerator::do_InstanceOf(InstanceOf* x) { void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this); LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL; CodeEmitInfo* patching_info = nullptr;
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
} }
@ -1248,7 +1248,7 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
offset += off.result()->as_jint(); offset += off.result()->as_jint();
} }
LIR_Opr base_op = buf.result(); LIR_Opr base_op = buf.result();
LIR_Address* a = NULL; LIR_Address* a = nullptr;
if (index->is_valid()) { if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG); LIR_Opr tmp = new_register(T_LONG);
@ -1318,7 +1318,7 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
offset += off.result()->as_jint(); offset += off.result()->as_jint();
} }
LIR_Opr base_op = buf.result(); LIR_Opr base_op = buf.result();
LIR_Address* a = NULL; LIR_Address* a = nullptr;
if (index->is_valid()) { if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG); LIR_Opr tmp = new_register(T_LONG);

View File

@ -399,7 +399,7 @@ void C1_MacroAssembler::null_check(Register r, Label* Lnull) {
trap_null_check(r); trap_null_check(r);
} else { // explicit } else { // explicit
//const address exception_entry = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); //const address exception_entry = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
assert(Lnull != NULL, "must have Label for explicit check"); assert(Lnull != nullptr, "must have Label for explicit check");
cmpdi(CCR0, r, 0); cmpdi(CCR0, r, 0);
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull); bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -86,7 +86,7 @@
Label& slow_case // continuation point if fast allocation fails Label& slow_case // continuation point if fast allocation fails
); );
void null_check(Register r, Label *Lnull = NULL); void null_check(Register r, Label *Lnull = nullptr);
address call_c_with_frame_resize(address dest, int frame_resize); address call_c_with_frame_resize(address dest, int frame_resize);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved. * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -370,7 +370,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
// return to the deoptimization handler entry that will cause re-execution // return to the deoptimization handler entry that will cause re-execution
// of the current bytecode. // of the current bytecode.
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
// Return to the deoptimization handler entry for unpacking and rexecute. // Return to the deoptimization handler entry for unpacking and rexecute.
// If we simply returned the we'd deopt as if any call we patched had just // If we simply returned the we'd deopt as if any call we patched had just
@ -390,7 +390,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
} }
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
OopMapSet* oop_maps = NULL; OopMapSet* oop_maps = nullptr;
// For better readability. // For better readability.
const bool must_gc_arguments = true; const bool must_gc_arguments = true;
@ -648,7 +648,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false); oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
address stub = deopt_blob->unpack_with_reexecution(); address stub = deopt_blob->unpack_with_reexecution();
//__ load_const_optimized(R0, stub); //__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
@ -716,7 +716,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created"); assert(deopt_blob != nullptr, "deoptimization blob must have been created");
restore_live_registers(sasm, noreg, noreg); restore_live_registers(sasm, noreg, noreg);
address stub = deopt_blob->unpack_with_reexecution(); address stub = deopt_blob->unpack_with_reexecution();
@ -754,7 +754,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
// Save registers, if required. // Save registers, if required.
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
OopMap* oop_map = NULL; OopMap* oop_map = nullptr;
const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/,
Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/; Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, SAP SE. All rights reserved. * Copyright (c) 2021, 2022, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -35,7 +35,7 @@ int C2SafepointPollStub::max_size() const {
} }
void C2SafepointPollStub::emit(C2_MacroAssembler& masm) { void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet"); "polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -301,7 +301,7 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
cmpd(CCR0, ary1, ary2); cmpd(CCR0, ary1, ary2);
beq(CCR0, Lskiploop); beq(CCR0, Lskiploop);
// Return false if one of them is NULL. // Return false if one of them is null.
cmpdi(CCR0, ary1, 0); cmpdi(CCR0, ary1, 0);
cmpdi(CCR1, ary2, 0); cmpdi(CCR1, ary2, 0);
li(result, 0); li(result, 0);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -79,9 +79,9 @@
const int IC_pos_in_java_to_interp_stub = 8; const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm. #define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) { address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2 #ifdef COMPILER2
if (mark == NULL) { if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call. // Get the mark within main instrs section which is set to the address of the call.
mark = cbuf.insts_mark(); mark = cbuf.insts_mark();
} }
@ -92,8 +92,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// Start the stub. // Start the stub.
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size()); address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
if (stub == NULL) { if (stub == nullptr) {
return NULL; // CodeCache is full return nullptr; // CodeCache is full
} }
// For java_to_interp stubs we use R11_scratch1 as scratch register // For java_to_interp stubs we use R11_scratch1 as scratch register
@ -113,11 +113,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// - load the call target from the constant pool // - load the call target from the constant pool
// - call // - call
__ calculate_address_from_global_toc(reg_scratch, __ method_toc()); __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); AddressLiteral ic = __ allocate_metadata_address((Metadata *)nullptr);
bool success = __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), bool success = __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()),
ic, reg_scratch, /*fixed_size*/ true); ic, reg_scratch, /*fixed_size*/ true);
if (!success) { if (!success) {
return NULL; // CodeCache is full return nullptr; // CodeCache is full
} }
if (ReoptimizeCallSequences) { if (ReoptimizeCallSequences) {
@ -126,7 +126,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
AddressLiteral a((address)-1); AddressLiteral a((address)-1);
success = __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); success = __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
if (!success) { if (!success) {
return NULL; // CodeCache is full return nullptr; // CodeCache is full
} }
__ mtctr(reg_scratch); __ mtctr(reg_scratch);
__ bctr(); __ bctr();
@ -145,7 +145,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
return stub; return stub;
#else #else
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return nullptr;
#endif #endif
} }
#undef __ #undef __
@ -165,7 +165,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(); address stub = find_stub();
guarantee(stub != NULL, "stub not found"); guarantee(stub != nullptr, "stub not found");
if (TraceICs) { if (TraceICs) {
ResourceMark rm; ResourceMark rm;
@ -191,7 +191,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub. // Reset stub.
address stub = static_stub->addr(); address stub = static_stub->addr();
assert(stub != NULL, "stub not found"); assert(stub != nullptr, "stub not found");
assert(CompiledICLocker::is_safe(stub), "mt unsafe call"); assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object. // Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub); NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
@ -211,7 +211,7 @@ void CompiledDirectStaticCall::verify() {
// Verify stub. // Verify stub.
address stub = find_stub(); address stub = find_stub();
assert(stub != NULL, "no stub found for static call"); assert(stub != nullptr, "no stub found for static call");
// Creation also verifies the object. // Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub); NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());

View File

@ -533,7 +533,7 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) { inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
// Unused. Alignment is done directly in new_stack_frame() / finish_thaw(). // Unused. Alignment is done directly in new_stack_frame() / finish_thaw().
return NULL; return nullptr;
} }
static inline void derelativize_one(intptr_t* const fp, int offset) { static inline void derelativize_one(intptr_t* const fp, int offset) {

View File

@ -30,7 +30,7 @@
template<typename FKind> template<typename FKind>
static inline intptr_t** link_address(const frame& f) { static inline intptr_t** link_address(const frame& f) {
Unimplemented(); Unimplemented();
return NULL; return nullptr;
} }
inline int ContinuationHelper::frame_align_words(int size) { inline int ContinuationHelper::frame_align_words(int size) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -39,7 +39,7 @@
// the perfect job. In those cases, decode_instruction0 may kick in // the perfect job. In those cases, decode_instruction0 may kick in
// and do it right. // and do it right.
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)" // If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL); static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr);
// platform-specific instruction annotations (like value of loaded constants) // platform-specific instruction annotations (like value of loaded constants)
static void annotate(address pc, outputStream* st); static void annotate(address pc, outputStream* st);

View File

@ -82,7 +82,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// construct the sender and do some validation of it. This goes a long way // construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code // toward eliminating issues when we get in frame construction code
if (_cb != NULL) { if (_cb != nullptr) {
// First check if the frame is complete and the test is reliable. // First check if the frame is complete and the test is reliable.
// Unfortunately we can only check frame completeness for runtime stubs // Unfortunately we can only check frame completeness for runtime stubs
@ -111,7 +111,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
// At this point, there still is a chance that fp_safe is false. // At this point, there still is a chance that fp_safe is false.
// In particular, (fp == NULL) might be true. So let's check and // In particular, fp might be null. So let's check and
// bail out before we actually dereference from fp. // bail out before we actually dereference from fp.
if (!fp_safe) { if (!fp_safe) {
return false; return false;
@ -130,7 +130,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// We must always be able to find a recognizable pc. // We must always be able to find a recognizable pc.
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
if (sender_blob == NULL) { if (sender_blob == nullptr) {
return false; return false;
} }
@ -186,7 +186,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
frame frame::sender_for_entry_frame(RegisterMap *map) const { frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(map != NULL, "map must be set"); assert(map != nullptr, "map must be set");
// Java frame called from C; skip all C frames and return top C // Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender. // frame of that chunk as the sender.
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
@ -195,7 +195,7 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
map->clear(); map->clear();
assert(map->include_argument_oops(), "should be set by clear"); assert(map->include_argument_oops(), "should be set by clear");
if (jfa->last_Java_pc() != NULL) { if (jfa->last_Java_pc() != nullptr) {
frame fr(jfa->last_Java_sp(), jfa->last_Java_pc()); frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
return fr; return fr;
} }
@ -262,7 +262,7 @@ void frame::patch_pc(Thread* thread, address pc) {
own_abi()->lr = (uint64_t)pc; own_abi()->lr = (uint64_t)pc;
_pc = pc; // must be set before call to get_deopt_original_pc _pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching"); assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
_pc = original_pc; _pc = original_pc;

View File

@ -127,7 +127,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
// Accessors // Accessors
// Return unique id for this frame. The id must have a value where we // Return unique id for this frame. The id must have a value where we
// can distinguish identity and younger/older relationship. NULL // can distinguish identity and younger/older relationship. null
// represents an invalid (incomparable) frame. // represents an invalid (incomparable) frame.
inline intptr_t* frame::id(void) const { inline intptr_t* frame::id(void) const {
// Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing. // Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing.
@ -137,7 +137,7 @@ inline intptr_t* frame::id(void) const {
// Return true if this frame is older (less recent activation) than // Return true if this frame is older (less recent activation) than
// the frame represented by id. // the frame represented by id.
inline bool frame::is_older(intptr_t* id) const { inline bool frame::is_older(intptr_t* id) const {
assert(this->id() != NULL && id != NULL, "NULL frame id"); assert(this->id() != nullptr && id != nullptr, "null frame id");
// Stack grows towards smaller addresses on ppc64. // Stack grows towards smaller addresses on ppc64.
return this->id() > id; return this->id() > id;
} }
@ -320,7 +320,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap *map) const {
} else { } else {
assert(!_cb->caller_must_gc_arguments(map->thread()), ""); assert(!_cb->caller_must_gc_arguments(map->thread()), "");
assert(!map->include_argument_oops(), ""); assert(!map->include_argument_oops(), "");
assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame"); assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
} }
} }
@ -339,29 +339,29 @@ inline frame frame::sender_for_compiled_frame(RegisterMap *map) const {
inline oop frame::saved_oop_result(RegisterMap* map) const { inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(R3->as_VMReg(), sp()); oop* result_adr = (oop *)map->location(R3->as_VMReg(), sp());
guarantee(result_adr != NULL, "bad register save location"); guarantee(result_adr != nullptr, "bad register save location");
return *result_adr; return *result_adr;
} }
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
oop* result_adr = (oop *)map->location(R3->as_VMReg(), sp()); oop* result_adr = (oop *)map->location(R3->as_VMReg(), sp());
guarantee(result_adr != NULL, "bad register save location"); guarantee(result_adr != nullptr, "bad register save location");
*result_adr = obj; *result_adr = obj;
} }
inline const ImmutableOopMap* frame::get_oop_map() const { inline const ImmutableOopMap* frame::get_oop_map() const {
if (_cb == NULL) return NULL; if (_cb == nullptr) return nullptr;
if (_cb->oop_maps() != NULL) { if (_cb->oop_maps() != nullptr) {
NativePostCallNop* nop = nativePostCallNop_at(_pc); NativePostCallNop* nop = nativePostCallNop_at(_pc);
if (nop != NULL && nop->displacement() != 0) { if (nop != nullptr && nop->displacement() != 0) {
int slot = ((nop->displacement() >> 24) & 0xff); int slot = ((nop->displacement() >> 24) & 0xff);
return _cb->oop_map_for_slot(slot, _pc); return _cb->oop_map_for_slot(slot, _pc);
} }
const ImmutableOopMap* oop_map = OopMapSet::find_map(this); const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
return oop_map; return oop_map;
} }
return NULL; return nullptr;
} }
inline int frame::compiled_frame_stack_argsize() const { inline int frame::compiled_frame_stack_argsize() const {
@ -370,7 +370,7 @@ inline int frame::compiled_frame_stack_argsize() const {
} }
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
assert(mask != NULL, ""); assert(mask != nullptr, "");
Method* m = interpreter_frame_method(); Method* m = interpreter_frame_method();
int bci = interpreter_frame_bci(); int bci = interpreter_frame_bci();
m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask); m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);

View File

@ -246,7 +246,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
__ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes); __ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
__ beq(CCR0, filtered); __ beq(CCR0, filtered);
// Crosses regions, storing NULL? // Crosses regions, storing null?
if (not_null) { if (not_null) {
#ifdef ASSERT #ifdef ASSERT
__ cmpdi(CCR0, new_val, 0); __ cmpdi(CCR0, new_val, 0);
@ -257,7 +257,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
__ beq(CCR0, filtered); __ beq(CCR0, filtered);
} }
// Storing region crossing non-NULL, is card already dirty? // Storing region crossing non-null, is card already dirty?
const Register Rcard_addr = tmp1; const Register Rcard_addr = tmp1;
Register Rbase = tmp2; Register Rbase = tmp2;
__ load_const_optimized(Rbase, (address)(ct->card_table()->byte_map_base()), /*temp*/ tmp3); __ load_const_optimized(Rbase, (address)(ct->card_table()->byte_map_base()), /*temp*/ tmp3);
@ -274,7 +274,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
__ cmpwi(CCR0, tmp3 /* card value */, (int)G1CardTable::dirty_card_val()); __ cmpwi(CCR0, tmp3 /* card value */, (int)G1CardTable::dirty_card_val());
__ beq(CCR0, filtered); __ beq(CCR0, filtered);
// Storing a region crossing, non-NULL oop, card is clean. // Storing a region crossing, non-null oop, card is clean.
// Dirty card and log. // Dirty card and log.
__ li(tmp3, (int)G1CardTable::dirty_card_val()); __ li(tmp3, (int)G1CardTable::dirty_card_val());
//release(); // G1: oops are allowed to get visible after dirty marking. //release(); // G1: oops are allowed to get visible after dirty marking.
@ -325,7 +325,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
tmp1, tmp2, tmp3, tmp1, tmp2, tmp3,
preservation_level); preservation_level);
// No need for post barrier if storing NULL // No need for post barrier if storing null
if (val != noreg) { if (val != noreg) {
if (precise) { if (precise) {
if (ind_or_offs.is_constant()) { if (ind_or_offs.is_constant()) {
@ -350,7 +350,7 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool on_reference = on_weak || on_phantom; bool on_reference = on_weak || on_phantom;
Label done; Label done;
if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; } if (on_oop && on_reference && L_handle_null == nullptr) { L_handle_null = &done; }
// Load the value of the referent field. // Load the value of the referent field.
ModRefBarrierSetAssembler::load_at(masm, decorators, type, ModRefBarrierSetAssembler::load_at(masm, decorators, type,
base, ind_or_offs, dst, base, ind_or_offs, dst,
@ -375,7 +375,7 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
MacroAssembler::PreservationLevel preservation_level) { MacroAssembler::PreservationLevel preservation_level) {
Label done, not_weak; Label done, not_weak;
__ cmpdi(CCR0, value, 0); __ cmpdi(CCR0, value, 0);
__ beq(CCR0, done); // Use NULL as-is. __ beq(CCR0, done); // Use null as-is.
__ clrrdi(tmp1, value, JNIHandles::tag_size); __ clrrdi(tmp1, value, JNIHandles::tag_size);
__ andi_(tmp2, value, JNIHandles::TypeTag::weak_global); __ andi_(tmp2, value, JNIHandles::TypeTag::weak_global);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021 SAP SE. All rights reserved. * Copyright (c) 2018, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -71,7 +71,7 @@ public:
Register base, RegisterOrConstant ind_or_offs, Register dst, Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, MacroAssembler::PreservationLevel preservation_level,
Label *L_handle_null = NULL); Label *L_handle_null = nullptr);
virtual void resolve_jobject(MacroAssembler* masm, Register value, virtual void resolve_jobject(MacroAssembler* masm, Register value,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,

View File

@ -85,7 +85,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
case T_ARRAY: case T_ARRAY:
case T_OBJECT: { case T_OBJECT: {
if (UseCompressedOops && in_heap) { if (UseCompressedOops && in_heap) {
if (L_handle_null != NULL) { // Label provided. if (L_handle_null != nullptr) { // Label provided.
__ lwz(dst, ind_or_offs, base); __ lwz(dst, ind_or_offs, base);
__ cmpwi(CCR0, dst, 0); __ cmpwi(CCR0, dst, 0);
__ beq(CCR0, *L_handle_null); __ beq(CCR0, *L_handle_null);
@ -100,7 +100,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
} }
} else { } else {
__ ld(dst, ind_or_offs, base); __ ld(dst, ind_or_offs, base);
if (L_handle_null != NULL) { if (L_handle_null != nullptr) {
__ cmpdi(CCR0, dst, 0); __ cmpdi(CCR0, dst, 0);
__ beq(CCR0, *L_handle_null); __ beq(CCR0, *L_handle_null);
} }
@ -117,7 +117,7 @@ void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,
MacroAssembler::PreservationLevel preservation_level) { MacroAssembler::PreservationLevel preservation_level) {
Label done, tagged, weak_tagged, verify; Label done, tagged, weak_tagged, verify;
__ cmpdi(CCR0, value, 0); __ cmpdi(CCR0, value, 0);
__ beq(CCR0, done); // Use NULL as-is. __ beq(CCR0, done); // Use null as-is.
__ andi_(tmp1, value, JNIHandles::tag_mask); __ andi_(tmp1, value, JNIHandles::tag_mask);
__ bne(CCR0, tagged); // Test for tag. __ bne(CCR0, tagged); // Test for tag.
@ -151,7 +151,7 @@ void BarrierSetAssembler::resolve_global_jobject(MacroAssembler* masm, Register
Label done; Label done;
__ cmpdi(CCR0, value, 0); __ cmpdi(CCR0, value, 0);
__ beq(CCR0, done); // Use NULL as-is. __ beq(CCR0, done); // Use null as-is.
#ifdef ASSERT #ifdef ASSERT
{ {

View File

@ -51,7 +51,7 @@ public:
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst, Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL); MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr);
virtual void resolve_jobject(MacroAssembler* masm, Register value, virtual void resolve_jobject(MacroAssembler* masm, Register value,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021 SAP SE. All rights reserved. * Copyright (c) 2018, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -97,7 +97,7 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
tmp1, tmp2, tmp3, tmp1, tmp2, tmp3,
preservation_level); preservation_level);
// No need for post barrier if storing NULL // No need for post barrier if storing null
if (val != noreg) { if (val != noreg) {
if (precise) { if (precise) {
if (ind_or_offs.is_constant()) { if (ind_or_offs.is_constant()) {

View File

@ -82,7 +82,7 @@ void ModRefBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register v
MacroAssembler::PreservationLevel preservation_level) { MacroAssembler::PreservationLevel preservation_level) {
Label done; Label done;
__ cmpdi(CCR0, value, 0); __ cmpdi(CCR0, value, 0);
__ beq(CCR0, done); // Use NULL as-is. __ beq(CCR0, done); // Use null as-is.
__ clrrdi(tmp1, value, JNIHandles::tag_size); __ clrrdi(tmp1, value, JNIHandles::tag_size);
__ ld(value, 0, tmp1); // Resolve (untagged) jobject. __ ld(value, 0, tmp1); // Resolve (untagged) jobject.

View File

@ -156,7 +156,7 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec
} }
// Invoke runtime. // Invoke runtime.
address jrt_address = NULL; address jrt_address = nullptr;
if (UseCompressedOops) { if (UseCompressedOops) {
jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry); jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry);
} else { } else {
@ -686,7 +686,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
false, success_flag, true); false, success_flag, true);
} else { } else {
__ cmpxchgd(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone, __ cmpxchgd(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
false, success_flag, NULL, true); false, success_flag, nullptr, true);
} }
// Skip the rest of the barrier if the CAS operation succeeds immediately. // Skip the rest of the barrier if the CAS operation succeeds immediately.
@ -963,7 +963,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
bool is_native = ShenandoahBarrierSet::is_native_access(decorators); bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
address jrt_address = NULL; address jrt_address = nullptr;
if (is_strong) { if (is_strong) {
if (is_native) { if (is_native) {
@ -987,7 +987,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s
assert(is_native, "phantom load reference barrier must be called off-heap"); assert(is_native, "phantom load reference barrier must be called off-heap");
jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom); jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
} }
assert(jrt_address != NULL, "load reference barrier runtime routine cannot be found"); assert(jrt_address != nullptr, "load reference barrier runtime routine cannot be found");
__ save_LR_CR(R11_tmp); __ save_LR_CR(R11_tmp);
__ push_frame_reg_args(nbytes_save, R11_tmp); __ push_frame_reg_args(nbytes_save, R11_tmp);

View File

@ -110,7 +110,7 @@ public:
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst, Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, Label* L_handle_null = NULL); MacroAssembler::PreservationLevel preservation_level, Label* L_handle_null = nullptr);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath); Register obj, Register tmp, Label& slowpath);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022 SAP SE. All rights reserved. * Copyright (c) 2021, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -48,7 +48,7 @@ public:
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst, Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL); MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr);
#ifdef ASSERT #ifdef ASSERT
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,

View File

@ -34,7 +34,7 @@
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks. define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true); define_pd_global(bool, TrapBasedNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast. define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls passed to check cast.
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI); define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);

View File

@ -79,7 +79,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(index). // Load object from cpool->resolved_references(index).
void load_resolved_reference_at_index(Register result, Register index, Register tmp1, Register tmp2, void load_resolved_reference_at_index(Register result, Register index, Register tmp1, Register tmp2,
Label *L_handle_null = NULL); Label *L_handle_null = nullptr);
// load cpool->resolved_klass_at(index) // load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass); void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);

View File

@ -932,7 +932,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// // We stored the monitor address into the object's mark word. // // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header)) // } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case. // // Simple recursive case.
// monitor->lock()->set_displaced_header(NULL); // monitor->lock()->set_displaced_header(nullptr);
// } else { // } else {
// // Slow path. // // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor); // InterpreterRuntime::monitorenter(THREAD, monitor);
@ -993,7 +993,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// } else if (THREAD->is_lock_owned((address)displaced_header)) // } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case. // // Simple recursive case.
// monitor->lock()->set_displaced_header(NULL); // monitor->lock()->set_displaced_header(nullptr);
// We did not see an unlocked object so try the fast recursive case. // We did not see an unlocked object so try the fast recursive case.
@ -1043,12 +1043,12 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
// template code: // template code:
// //
// if ((displaced_header = monitor->displaced_header()) == NULL) { // if ((displaced_header = monitor->displaced_header()) == nullptr) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. // // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
// monitor->set_obj(NULL); // monitor->set_obj(nullptr);
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word. // // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL); // monitor->set_obj(nullptr);
// } else { // } else {
// // Slow path. // // Slow path.
// InterpreterRuntime::monitorexit(monitor); // InterpreterRuntime::monitorexit(monitor);
@ -1074,7 +1074,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word. // // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL); // monitor->set_obj(nullptr);
// If we still have a lightweight lock, unlock the object and be done. // If we still have a lightweight lock, unlock the object and be done.
@ -1109,7 +1109,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
Label done; Label done;
b(done); // Monitor register may be overwritten! Runtime has already freed the slot. b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
// Exchange worked, do monitor->set_obj(NULL); // Exchange worked, do monitor->set_obj(nullptr);
align(32, 12); align(32, 12);
bind(free_slot); bind(free_slot);
li(R0, 0); li(R0, 0);
@ -1701,7 +1701,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
} }
// In the fall-through case, we found no matching receiver, but we // In the fall-through case, we found no matching receiver, but we
// observed the receiver[start_row] is NULL. // observed the receiver[start_row] is null.
// Fill in the receiver field and increment the count. // Fill in the receiver field and increment the count.
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
@ -2114,7 +2114,7 @@ void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1,
li(Rtmp, 0); li(Rtmp, 0);
mr_if_needed(R3, Rexception); mr_if_needed(R3, Rexception);
std(Rtmp, thread_(pending_exception)); // Clear exception in thread std(Rtmp, thread_(pending_exception)); // Clear exception in thread
if (Interpreter::rethrow_exception_entry() != NULL) { if (Interpreter::rethrow_exception_entry() != nullptr) {
// Already got entry address. // Already got entry address.
load_dispatch_table(Rtmp, (address*)Interpreter::rethrow_exception_entry()); load_dispatch_table(Rtmp, (address*)Interpreter::rethrow_exception_entry());
} else { } else {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -99,10 +99,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
Register r = jni_arg.is_register() ? jni_arg.as_register() : R11_scratch1; Register r = jni_arg.is_register() ? jni_arg.as_register() : R11_scratch1;
// The handle for a receiver will never be null. // The handle for a receiver will never be null.
bool do_NULL_check = offset() != 0 || is_static(); bool do_null_check = offset() != 0 || is_static();
Label do_null; Label do_null;
if (do_NULL_check) { if (do_null_check) {
__ ld(R0, locals_j_arg_at(offset())); __ ld(R0, locals_j_arg_at(offset()));
__ cmpdi(CCR0, R0, 0); __ cmpdi(CCR0, R0, 0);
__ li(r, 0); __ li(r, 0);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved. * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -35,10 +35,10 @@ public:
inline void clear(void) { inline void clear(void) {
// clearing _last_Java_sp must be first // clearing _last_Java_sp must be first
_last_Java_sp = NULL; _last_Java_sp = nullptr;
// fence? // fence?
OrderAccess::release(); OrderAccess::release();
_last_Java_pc = NULL; _last_Java_pc = nullptr;
} }
inline void set(intptr_t* sp, address pc) { inline void set(intptr_t* sp, address pc) {
@ -52,10 +52,10 @@ public:
// We must clear _last_Java_sp before copying the rest of the new data. // We must clear _last_Java_sp before copying the rest of the new data.
// //
// Hack Alert: Temporary bugfix for 4717480/4721647 // Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp // To act like previous version (pd_cache_state) don't null _last_Java_sp
// unless the value is changing. // unless the value is changing.
if (_last_Java_sp != src->_last_Java_sp) { if (_last_Java_sp != src->_last_Java_sp) {
_last_Java_sp = NULL; _last_Java_sp = nullptr;
OrderAccess::release(); OrderAccess::release();
} }
_last_Java_pc = src->_last_Java_pc; _last_Java_pc = src->_last_Java_pc;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -56,7 +56,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
name = NULL; // unreachable name = nullptr; // unreachable
} }
ResourceMark rm; ResourceMark rm;
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
@ -149,7 +149,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
slow_case_addr = NULL; // unreachable slow_case_addr = nullptr; // unreachable
} }
__ load_const_optimized(R12, slow_case_addr, R0); __ load_const_optimized(R12, slow_case_addr, R0);
__ call_c_and_return_to_caller(R12); // tail call __ call_c_and_return_to_caller(R12); // tail call

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
// convert JVMCI register indices (as used in oop maps) to HotSpot registers // convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) { VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
return NULL; return nullptr;
} }
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) { bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {

View File

@ -274,7 +274,7 @@ bool MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a,
// pool entries instead of inserting it at the loads; patching of a constant // pool entries instead of inserting it at the loads; patching of a constant
// pool entry should be less expensive. // pool entry should be less expensive.
address const_address = address_constant((address)a.value(), RelocationHolder::none); address const_address = address_constant((address)a.value(), RelocationHolder::none);
if (const_address == NULL) { return false; } // allocation failure if (const_address == nullptr) { return false; } // allocation failure
// Relocate at the pc of the load. // Relocate at the pc of the load.
relocate(a.rspec()); relocate(a.rspec());
toc_offset = (int)(const_address - code()->consts()->start()); toc_offset = (int)(const_address - code()->consts()->start());
@ -361,27 +361,27 @@ void MacroAssembler::patch_const(address a, long x) {
} }
AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
assert(oop_recorder() != NULL, "this assembler needs a Recorder"); assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
int index = oop_recorder()->allocate_metadata_index(obj); int index = oop_recorder()->allocate_metadata_index(obj);
RelocationHolder rspec = metadata_Relocation::spec(index); RelocationHolder rspec = metadata_Relocation::spec(index);
return AddressLiteral((address)obj, rspec); return AddressLiteral((address)obj, rspec);
} }
AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
assert(oop_recorder() != NULL, "this assembler needs a Recorder"); assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
int index = oop_recorder()->find_index(obj); int index = oop_recorder()->find_index(obj);
RelocationHolder rspec = metadata_Relocation::spec(index); RelocationHolder rspec = metadata_Relocation::spec(index);
return AddressLiteral((address)obj, rspec); return AddressLiteral((address)obj, rspec);
} }
AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->allocate_oop_index(obj); int oop_index = oop_recorder()->allocate_oop_index(obj);
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
} }
AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj); int oop_index = oop_recorder()->find_index(obj);
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
} }
@ -466,7 +466,7 @@ address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
} }
// variant 4 ??? // variant 4 ???
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return nullptr;
} }
void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) { void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
@ -706,7 +706,7 @@ address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr,
instruction_addr); instruction_addr);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return nullptr;
} }
} }
@ -1086,14 +1086,14 @@ address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocTyp
// this call needs to be relocatable // this call needs to be relocatable
if (!ReoptimizeCallSequences if (!ReoptimizeCallSequences
|| (rt != relocInfo::runtime_call_type && rt != relocInfo::none) || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
|| fd == NULL // support code-size estimation || fd == nullptr // support code-size estimation
|| !fd->is_friend_function() || !fd->is_friend_function()
|| fd->entry() == NULL) { || fd->entry() == nullptr) {
// it's not a friend function as defined by class FunctionDescriptor, // it's not a friend function as defined by class FunctionDescriptor,
// so do a full call-c here. // so do a full call-c here.
load_const(R11, (address)fd, R0); load_const(R11, (address)fd, R0);
bool has_env = (fd != NULL && fd->env() != NULL); bool has_env = (fd != nullptr && fd->env() != nullptr);
return branch_to(R11, /*and_link=*/true, return branch_to(R11, /*and_link=*/true,
/*save toc=*/false, /*save toc=*/false,
/*restore toc=*/false, /*restore toc=*/false,
@ -1150,12 +1150,12 @@ address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
|| !fd->is_friend_function()) { || !fd->is_friend_function()) {
// It's not a friend function as defined by class FunctionDescriptor, // It's not a friend function as defined by class FunctionDescriptor,
// so do a full call-c here. // so do a full call-c here.
assert(fd->entry() != NULL, "function must be linked"); assert(fd->entry() != nullptr, "function must be linked");
AddressLiteral fd_entry(fd->entry()); AddressLiteral fd_entry(fd->entry());
bool success = load_const_from_method_toc(R11, fd_entry, toc, /*fixed_size*/ true); bool success = load_const_from_method_toc(R11, fd_entry, toc, /*fixed_size*/ true);
mtctr(R11); mtctr(R11);
if (fd->env() == NULL) { if (fd->env() == nullptr) {
li(R11, 0); li(R11, 0);
nop(); nop();
} else { } else {
@ -1167,7 +1167,7 @@ address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
success = success && load_const_from_method_toc(R2_TOC, fd_toc, toc, /*fixed_size*/ true); success = success && load_const_from_method_toc(R2_TOC, fd_toc, toc, /*fixed_size*/ true);
bctrl(); bctrl();
_last_calls_return_pc = pc(); _last_calls_return_pc = pc();
if (!success) { return NULL; } if (!success) { return nullptr; }
} else { } else {
// It's a friend function, load the entry point and don't care about // It's a friend function, load the entry point and don't care about
// toc and env. Use an optimizable call instruction, but ensure the // toc and env. Use an optimizable call instruction, but ensure the
@ -1306,8 +1306,8 @@ bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
if (!ucontext) { if (!ucontext) {
// Set polling address. // Set polling address.
if (polling_address_ptr != NULL) { if (polling_address_ptr != nullptr) {
*polling_address_ptr = NULL; *polling_address_ptr = nullptr;
} }
return true; // No ucontext given. Can't check value of ra. Assume true. return true; // No ucontext given. Can't check value of ra. Assume true.
} }
@ -1318,12 +1318,12 @@ bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
ucontext_t* uc = (ucontext_t*) ucontext; ucontext_t* uc = (ucontext_t*) ucontext;
// Set polling address. // Set polling address.
address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds; address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
if (polling_address_ptr != NULL) { if (polling_address_ptr != nullptr) {
*polling_address_ptr = addr; *polling_address_ptr = addr;
} }
return SafepointMechanism::is_poll_address(addr); return SafepointMechanism::is_poll_address(addr);
#else #else
// Not on Linux, ucontext must be NULL. // Not on Linux, ucontext must be null.
ShouldNotReachHere(); ShouldNotReachHere();
return false; return false;
#endif #endif
@ -1385,14 +1385,14 @@ address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext)
int rb = inv_rb_field(instruction); int rb = inv_rb_field(instruction);
address sp = (address)uc->uc_mcontext.regs->gpr[1]; address sp = (address)uc->uc_mcontext.regs->gpr[1];
long rb_val = (long)uc->uc_mcontext.regs->gpr[rb]; long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
return ra != 1 || rb_val >= 0 ? NULL // not a stack bang return ra != 1 || rb_val >= 0 ? nullptr // not a stack bang
: sp + rb_val; // banged address : sp + rb_val; // banged address
} }
return NULL; // not a stack bang return nullptr; // not a stack bang
#else #else
// workaround not needed on !LINUX :-) // workaround not needed on !LINUX :-)
ShouldNotCallThis(); ShouldNotCallThis();
return NULL; return nullptr;
#endif #endif
} }
@ -1701,7 +1701,7 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
Register int_flag_success, Label* failed_ext, bool contention_hint, bool weak) { Register int_flag_success, Label* failed_ext, bool contention_hint, bool weak) {
Label retry; Label retry;
Label failed_int; Label failed_int;
Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int; Label& failed = (failed_ext != nullptr) ? *failed_ext : failed_int;
Label done; Label done;
// Save one branch if result is returned via register and result register is different from the other ones. // Save one branch if result is returned via register and result register is different from the other ones.
@ -1709,7 +1709,7 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value.register_or_noreg() && bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value.register_or_noreg() &&
int_flag_success!=exchange_value && int_flag_success!=addr_base); int_flag_success!=exchange_value && int_flag_success!=addr_base);
assert(!weak || flag == CCR0, "weak only supported with CCR0"); assert(!weak || flag == CCR0, "weak only supported with CCR0");
assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both"); assert(int_flag_success == noreg || failed_ext == nullptr, "cannot have both");
if (use_result_reg && preset_result_reg) { if (use_result_reg && preset_result_reg) {
li(int_flag_success, 0); // preset (assume cas failed) li(int_flag_success, 0); // preset (assume cas failed)
@ -1816,7 +1816,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
} }
} }
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
// if (scan->interface() == intf) { // if (scan->interface() == intf) {
// result = (klass + scan->offset() + itable_index); // result = (klass + scan->offset() + itable_index);
// } // }
@ -1901,12 +1901,12 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label L_fallthrough; Label L_fallthrough;
int label_nulls = 0; int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1 || assert(label_nulls <= 1 ||
(L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
"at most one NULL in the batch, usually"); "at most one null in the batch, usually");
// If the pointers are equal, we are done (e.g., String[] elements). // If the pointers are equal, we are done (e.g., String[] elements).
// This self-check enables sharing of secondary supertype arrays among // This self-check enables sharing of secondary supertype arrays among
@ -2020,7 +2020,7 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
bind(hit); bind(hit);
std(super_klass, target_offset, sub_klass); // save result to cache std(super_klass, target_offset, sub_klass); // save result to cache
if (result_reg != noreg) { li(result_reg, 0); } // load zero result (indicates a hit) if (result_reg != noreg) { li(result_reg, 0); } // load zero result (indicates a hit)
if (L_success != NULL) { b(*L_success); } if (L_success != nullptr) { b(*L_success); }
else if (result_reg == noreg) { blr(); } // return with CR0.eq if neither label nor result reg provided else if (result_reg == noreg) { blr(); } // return with CR0.eq if neither label nor result reg provided
bind(fallthru); bind(fallthru);
@ -2039,12 +2039,12 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
} }
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
Label L_fallthrough; Label L_fallthrough;
if (L_fast_path == NULL) { if (L_fast_path == nullptr) {
L_fast_path = &L_fallthrough; L_fast_path = &L_fallthrough;
} else if (L_slow_path == NULL) { } else if (L_slow_path == nullptr) {
L_slow_path = &L_fallthrough; L_slow_path = &L_fallthrough;
} }
@ -2133,7 +2133,7 @@ address MacroAssembler::emit_trampoline_stub(int destination_toc_offset,
int insts_call_instruction_offset, Register Rtoc) { int insts_call_instruction_offset, Register Rtoc) {
// Start the stub. // Start the stub.
address stub = start_a_stub(64); address stub = start_a_stub(64);
if (stub == NULL) { return NULL; } // CodeCache full: bail out if (stub == nullptr) { return nullptr; } // CodeCache full: bail out
// Create a trampoline stub relocation which relates this trampoline stub // Create a trampoline stub relocation which relates this trampoline stub
// with the call instruction at insts_call_instruction_offset in the // with the call instruction at insts_call_instruction_offset in the
@ -2349,7 +2349,7 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
mulli(tmpReg, tmpReg, RTMAbortRatio); // allowable range: int16 mulli(tmpReg, tmpReg, RTMAbortRatio); // allowable range: int16
cmpd(CCR0, R0, tmpReg); cmpd(CCR0, R0, tmpReg);
blt(CCR0, L_check_always_rtm1); // jump to reload blt(CCR0, L_check_always_rtm1); // jump to reload
if (method_data != NULL) { if (method_data != nullptr) {
// Set rtm_state to "no rtm" in MDO. // Set rtm_state to "no rtm" in MDO.
// Not using a metadata relocation. Method and Class Loader are kept alive anyway. // Not using a metadata relocation. Method and Class Loader are kept alive anyway.
// (See nmethod::metadata_do and CodeBuffer::finalize_oop_references.) // (See nmethod::metadata_do and CodeBuffer::finalize_oop_references.)
@ -2370,7 +2370,7 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
cmpd(CCR0, tmpReg, R0); cmpd(CCR0, tmpReg, R0);
} }
blt(CCR0, L_done); blt(CCR0, L_done);
if (method_data != NULL) { if (method_data != nullptr) {
// Set rtm_state to "always rtm" in MDO. // Set rtm_state to "always rtm" in MDO.
// Not using a metadata relocation. See above. // Not using a metadata relocation. See above.
load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg); load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
@ -2386,14 +2386,14 @@ void MacroAssembler::rtm_profiling(Register abort_status_Reg, Register temp_Reg,
Metadata* method_data, Metadata* method_data,
bool profile_rtm) { bool profile_rtm) {
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(rtm_counters != nullptr, "should not be null when profiling RTM");
// Update rtm counters based on state at abort. // Update rtm counters based on state at abort.
// Reads abort_status_Reg, updates flags. // Reads abort_status_Reg, updates flags.
assert_different_registers(abort_status_Reg, temp_Reg); assert_different_registers(abort_status_Reg, temp_Reg);
load_const_optimized(temp_Reg, (address)rtm_counters, R0); load_const_optimized(temp_Reg, (address)rtm_counters, R0);
rtm_counters_update(abort_status_Reg, temp_Reg); rtm_counters_update(abort_status_Reg, temp_Reg);
if (profile_rtm) { if (profile_rtm) {
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(rtm_counters != nullptr, "should not be null when profiling RTM");
rtm_abort_ratio_calculation(temp_Reg, rtm_counters, method_data); rtm_abort_ratio_calculation(temp_Reg, rtm_counters, method_data);
} }
} }
@ -2497,7 +2497,7 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
if (RTMTotalCountIncrRate > 1) { if (RTMTotalCountIncrRate > 1) {
branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement); branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement);
} }
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(stack_rtm_counters != nullptr, "should not be null when profiling RTM");
load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0); load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
//atomic_inc_ptr(tmp, /*temp, will be reloaded*/mark_word); We don't increment atomically //atomic_inc_ptr(tmp, /*temp, will be reloaded*/mark_word); We don't increment atomically
ldx(mark_word, tmp); ldx(mark_word, tmp);
@ -2564,7 +2564,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
if (RTMTotalCountIncrRate > 1) { if (RTMTotalCountIncrRate > 1) {
branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement); branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement);
} }
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(rtm_counters != nullptr, "should not be null when profiling RTM");
load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg); load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
//atomic_inc_ptr(R0, tmpReg); We don't increment atomically //atomic_inc_ptr(R0, tmpReg); We don't increment atomically
ldx(tmpReg, R0); ldx(tmpReg, R0);
@ -2707,7 +2707,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Handle existing monitor. // Handle existing monitor.
bind(object_has_monitor); bind(object_has_monitor);
// The object's monitor m is unlocked iff m->owner == NULL, // The object's monitor m is unlocked iff m->owner is null,
// otherwise m->owner may contain a thread or a stack address. // otherwise m->owner may contain a thread or a stack address.
#if INCLUDE_RTM_OPT #if INCLUDE_RTM_OPT
@ -2718,7 +2718,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
} else { } else {
#endif // INCLUDE_RTM_OPT #endif // INCLUDE_RTM_OPT
// Try to CAS m->owner from NULL to current thread. // Try to CAS m->owner from null to current thread.
addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value); addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value);
cmpxchgd(/*flag=*/flag, cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header, /*current_value=*/current_header,
@ -2911,7 +2911,7 @@ void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Ja
"last_Java_pc not zeroed before leaving Java"); "last_Java_pc not zeroed before leaving Java");
// When returning from calling out from Java mode the frame anchor's // When returning from calling out from Java mode the frame anchor's
// last_Java_pc will always be set to NULL. It is set here so that // last_Java_pc will always be set to null. It is set here so that
// if we are doing a call to native (not VM) that we capture the // if we are doing a call to native (not VM) that we capture the
// known pc and don't have to rely on the native call having a // known pc and don't have to rely on the native call having a
// standard frame linkage where we can find the pc. // standard frame linkage where we can find the pc.
@ -4300,7 +4300,7 @@ void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, con
// Call a C-function that prints output. // Call a C-function that prints output.
void MacroAssembler::stop(int type, const char* msg) { void MacroAssembler::stop(int type, const char* msg) {
bool msg_present = (msg != NULL); bool msg_present = (msg != nullptr);
#ifndef PRODUCT #ifndef PRODUCT
block_comment(err_msg("stop(type %d): %s {", type, msg_present ? msg : "null")); block_comment(err_msg("stop(type %d): %s {", type, msg_present ? msg : "null"));

View File

@ -427,12 +427,12 @@ class MacroAssembler: public Assembler {
inline void load_from_polling_page(Register polling_page_address, int offset = 0); inline void load_from_polling_page(Register polling_page_address, int offset = 0);
// Check whether instruction is a read access to the polling page // Check whether instruction is a read access to the polling page
// which was emitted by load_from_polling_page(..). // which was emitted by load_from_polling_page(..).
static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/, static bool is_load_from_polling_page(int instruction, void* ucontext/*may be nullptr*/,
address* polling_address_ptr = NULL); address* polling_address_ptr = nullptr);
// Support for NULL-checks // Support for null-checks
// //
// Generates code that causes a NULL OS exception if the content of reg is NULL. // Generates code that causes a null OS exception if the content of reg is null.
// If the accessed location is M[reg + offset] and the offset is known, provide the // If the accessed location is M[reg + offset] and the offset is known, provide the
// offset. No explicit code generation is needed if the offset is within a certain // offset. No explicit code generation is needed if the offset is within a certain
// range (0 <= offset <= page_size). // range (0 <= offset <= page_size).
@ -542,7 +542,7 @@ class MacroAssembler: public Assembler {
void cmpxchgd(ConditionRegister flag, void cmpxchgd(ConditionRegister flag,
Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value, Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value,
Register addr_base, int semantics, bool cmpxchgx_hint = false, Register addr_base, int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false, bool weak = false); Register int_flag_success = noreg, Label* failed = nullptr, bool contention_hint = false, bool weak = false);
// interface method calling // interface method calling
void lookup_interface_method(Register recv_klass, void lookup_interface_method(Register recv_klass,
@ -561,7 +561,7 @@ class MacroAssembler: public Assembler {
// Test sub_klass against super_klass, with fast and slow paths. // Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through. // One of the three labels can be null, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass. // If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg and temp2_reg. // No registers are killed, except temp_reg and temp2_reg.
// If super_check_offset is not -1, temp2_reg is not used and can be noreg. // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
@ -571,7 +571,7 @@ class MacroAssembler: public Assembler {
Register temp2_reg, Register temp2_reg,
Label* L_success, Label* L_success,
Label* L_failure, Label* L_failure,
Label* L_slow_path = NULL, // default fall through Label* L_slow_path = nullptr, // default fall through
RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
// The rest of the type check; must be wired to a corresponding fast path. // The rest of the type check; must be wired to a corresponding fast path.
@ -583,7 +583,7 @@ class MacroAssembler: public Assembler {
Register super_klass, Register super_klass,
Register temp1_reg, Register temp1_reg,
Register temp2_reg, Register temp2_reg,
Label* L_success = NULL, Label* L_success = nullptr,
Register result_reg = noreg); Register result_reg = noreg);
// Simplified, combined version, good for typical uses. // Simplified, combined version, good for typical uses.
@ -596,8 +596,8 @@ class MacroAssembler: public Assembler {
void clinit_barrier(Register klass, void clinit_barrier(Register klass,
Register thread, Register thread,
Label* L_fast_path = NULL, Label* L_fast_path = nullptr,
Label* L_slow_path = NULL); Label* L_slow_path = nullptr);
// Method handle support (JSR 292). // Method handle support (JSR 292).
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0); RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
@ -631,7 +631,7 @@ class MacroAssembler: public Assembler {
void rtm_profiling(Register abort_status_Reg, Register temp_Reg, void rtm_profiling(Register abort_status_Reg, Register temp_Reg,
RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, void rtm_retry_lock_on_abort(Register retry_count, Register abort_status,
Label& retryLabel, Label* checkRetry = NULL); Label& retryLabel, Label* checkRetry = nullptr);
void rtm_retry_lock_on_busy(Register retry_count, Register owner_addr, Label& retryLabel); void rtm_retry_lock_on_busy(Register retry_count, Register owner_addr, Label& retryLabel);
void rtm_stack_locking(ConditionRegister flag, Register obj, Register mark_word, Register tmp, void rtm_stack_locking(ConditionRegister flag, Register obj, Register mark_word, Register tmp,
Register retry_on_abort_count, Register retry_on_abort_count,
@ -647,9 +647,9 @@ class MacroAssembler: public Assembler {
void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box, void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp1, Register tmp2, Register tmp3,
RTMLockingCounters* rtm_counters = NULL, RTMLockingCounters* rtm_counters = nullptr,
RTMLockingCounters* stack_rtm_counters = NULL, RTMLockingCounters* stack_rtm_counters = nullptr,
Metadata* method_data = NULL, Metadata* method_data = nullptr,
bool use_rtm = false, bool profile_rtm = false); bool use_rtm = false, bool profile_rtm = false);
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
@ -707,7 +707,7 @@ class MacroAssembler: public Assembler {
inline void access_load_at(BasicType type, DecoratorSet decorators, inline void access_load_at(BasicType type, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs, Register dst, Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL); MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr);
public: public:
// Specify tmp1 for better code in certain compressed oops cases. Specify Label to bail out on null oop. // Specify tmp1 for better code in certain compressed oops cases. Specify Label to bail out on null oop.
@ -715,7 +715,7 @@ class MacroAssembler: public Assembler {
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1, inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, MacroAssembler::PreservationLevel preservation_level,
DecoratorSet decorators = 0, Label *L_handle_null = NULL); DecoratorSet decorators = 0, Label *L_handle_null = nullptr);
inline void store_heap_oop(Register d, RegisterOrConstant offs, Register s1, inline void store_heap_oop(Register d, RegisterOrConstant offs, Register s1,
Register tmp1, Register tmp2, Register tmp3, Register tmp1, Register tmp2, Register tmp3,
@ -927,10 +927,10 @@ class MacroAssembler: public Assembler {
}; };
// Prints msg, dumps registers and stops execution. // Prints msg, dumps registers and stops execution.
void stop (const char* msg = NULL) { stop(stop_stop, msg); } void stop (const char* msg = nullptr) { stop(stop_stop, msg); }
void untested (const char* msg = NULL) { stop(stop_untested, msg); } void untested (const char* msg = nullptr) { stop(stop_untested, msg); }
void unimplemented (const char* msg = NULL) { stop(stop_unimplemented, msg); } void unimplemented (const char* msg = nullptr) { stop(stop_unimplemented, msg); }
void should_not_reach_here(const char* msg = NULL) { stop(stop_shouldnotreachhere, msg); } void should_not_reach_here(const char* msg = nullptr) { stop(stop_shouldnotreachhere, msg); }
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN; void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved. * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -419,7 +419,7 @@ inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register sr
} }
inline Register MacroAssembler::encode_heap_oop(Register d, Register src) { inline Register MacroAssembler::encode_heap_oop(Register d, Register src) {
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
if (VM_Version::has_isel()) { if (VM_Version::has_isel()) {
cmpdi(CCR0, src, 0); cmpdi(CCR0, src, 0);
Register co = encode_heap_oop_not_null(d, src); Register co = encode_heap_oop_not_null(d, src);
@ -451,7 +451,7 @@ inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register sr
sldi(d, current, CompressedOops::shift()); sldi(d, current, CompressedOops::shift());
current = d; current = d;
} }
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
add_const_optimized(d, current, CompressedOops::base(), R0); add_const_optimized(d, current, CompressedOops::base(), R0);
current = d; current = d;
} }
@ -461,7 +461,7 @@ inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register sr
inline void MacroAssembler::decode_heap_oop(Register d) { inline void MacroAssembler::decode_heap_oop(Register d) {
Label isNull; Label isNull;
bool use_isel = false; bool use_isel = false;
if (CompressedOops::base() != NULL) { if (CompressedOops::base() != nullptr) {
cmpwi(CCR0, d, 0); cmpwi(CCR0, d, 0);
if (VM_Version::has_isel()) { if (VM_Version::has_isel()) {
use_isel = true; use_isel = true;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -93,12 +93,12 @@
static bool const_oop_prefer_decode() { static bool const_oop_prefer_decode() {
// Prefer ConN+DecodeN over ConP in simple compressed oops mode. // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
return CompressedOops::base() == NULL; return CompressedOops::base() == nullptr;
} }
static bool const_klass_prefer_decode() { static bool const_klass_prefer_decode() {
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode. // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
return CompressedKlassPointers::base() == NULL; return CompressedKlassPointers::base() == nullptr;
} }
// Is it better to copy float constants, or load them directly from memory? // Is it better to copy float constants, or load them directly from memory?

View File

@ -158,7 +158,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ bctr(); __ bctr();
__ bind(L_no_such_method); __ bind(L_no_such_method);
assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!"); assert(StubRoutines::throw_AbstractMethodError_entry() != nullptr, "not yet generated!");
__ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry()); __ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
__ mtctr(target); __ mtctr(target);
__ bctr(); __ bctr();
@ -224,14 +224,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all allow an appendix argument. // They all allow an appendix argument.
__ stop("Should not reach here"); // empty stubs make SG sick __ stop("Should not reach here"); // empty stubs make SG sick
return NULL; return nullptr;
} }
// No need in interpreter entry for linkToNative for now. // No need in interpreter entry for linkToNative for now.
// Interpreter calls compiled entry through i2c. // Interpreter calls compiled entry through i2c.
if (iid == vmIntrinsics::_linkToNative) { if (iid == vmIntrinsics::_linkToNative) {
__ stop("Should not reach here"); // empty stubs make SG sick __ stop("Should not reach here"); // empty stubs make SG sick
return NULL; return nullptr;
} }
Register R15_argbase = R15_esp; // parameter (preserved) Register R15_argbase = R15_esp; // parameter (preserved)
@ -495,8 +495,8 @@ void trace_method_handle_stub(const char* adaptername,
intptr_t* entry_sp, intptr_t* entry_sp,
intptr_t* saved_regs) { intptr_t* saved_regs) {
bool has_mh = (strstr(adaptername, "/static") == NULL && bool has_mh = (strstr(adaptername, "/static") == nullptr &&
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH strstr(adaptername, "linkTo") == nullptr); // static linkers don't have MH
const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23"; const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
log_info(methodhandles)("MH %s %s=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, log_info(methodhandles)("MH %s %s=" INTPTR_FORMAT " sp=" INTPTR_FORMAT,
adaptername, mh_reg_name, p2i(mh), p2i(entry_sp)); adaptername, mh_reg_name, p2i(mh), p2i(entry_sp));

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 SAP SE. All rights reserved. * Copyright (c) 2012, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -45,7 +45,7 @@
bool NativeInstruction::is_sigill_not_entrant_at(address addr) { bool NativeInstruction::is_sigill_not_entrant_at(address addr) {
if (!Assembler::is_illtrap(addr)) return false; if (!Assembler::is_illtrap(addr)) return false;
CodeBlob* cb = CodeCache::find_blob(addr); CodeBlob* cb = CodeCache::find_blob(addr);
if (cb == NULL || !cb->is_nmethod()) return false; if (cb == nullptr || !cb->is_nmethod()) return false;
nmethod *nm = (nmethod *)cb; nmethod *nm = (nmethod *)cb;
// This method is not_entrant iff the illtrap instruction is // This method is not_entrant iff the illtrap instruction is
// located at the verified entry point. // located at the verified entry point.
@ -133,12 +133,12 @@ address NativeCall::get_trampoline() {
address call_addr = addr_at(0); address call_addr = addr_at(0);
CodeBlob *code = CodeCache::find_blob(call_addr); CodeBlob *code = CodeCache::find_blob(call_addr);
assert(code != NULL, "Could not find the containing code blob"); assert(code != nullptr, "Could not find the containing code blob");
// There are no relocations available when the code gets relocated // There are no relocations available when the code gets relocated
// because of CodeBuffer expansion. // because of CodeBuffer expansion.
if (code->relocation_size() == 0) if (code->relocation_size() == 0)
return NULL; return nullptr;
address bl_destination = Assembler::bxx_destination(call_addr); address bl_destination = Assembler::bxx_destination(call_addr);
if (code->contains(bl_destination) && if (code->contains(bl_destination) &&
@ -178,7 +178,7 @@ void NativeFarCall::verify() {
address NativeMovConstReg::next_instruction_address() const { address NativeMovConstReg::next_instruction_address() const {
#ifdef ASSERT #ifdef ASSERT
CodeBlob* nm = CodeCache::find_blob(instruction_address()); CodeBlob* nm = CodeCache::find_blob(instruction_address());
assert(nm != NULL, "Could not find code blob"); assert(nm != nullptr, "Could not find code blob");
assert(!MacroAssembler::is_set_narrow_oop(addr_at(0), nm->content_begin()), "Should not patch narrow oop here"); assert(!MacroAssembler::is_set_narrow_oop(addr_at(0), nm->content_begin()), "Should not patch narrow oop here");
#endif #endif
@ -197,7 +197,7 @@ intptr_t NativeMovConstReg::data() const {
} }
CodeBlob* cb = CodeCache::find_blob(addr); CodeBlob* cb = CodeCache::find_blob(addr);
assert(cb != NULL, "Could not find code blob"); assert(cb != nullptr, "Could not find code blob");
if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) { if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
narrowOop no = MacroAssembler::get_narrow_oop(addr, cb->content_begin()); narrowOop no = MacroAssembler::get_narrow_oop(addr, cb->content_begin());
// We can reach here during GC with 'no' pointing to new object location // We can reach here during GC with 'no' pointing to new object location
@ -216,17 +216,17 @@ intptr_t NativeMovConstReg::data() const {
address NativeMovConstReg::set_data_plain(intptr_t data, CodeBlob *cb) { address NativeMovConstReg::set_data_plain(intptr_t data, CodeBlob *cb) {
address addr = instruction_address(); address addr = instruction_address();
address next_address = NULL; address next_address = nullptr;
if (!cb) cb = CodeCache::find_blob(addr); if (!cb) cb = CodeCache::find_blob(addr);
if (cb != NULL && MacroAssembler::is_load_const_from_method_toc_at(addr)) { if (cb != nullptr && MacroAssembler::is_load_const_from_method_toc_at(addr)) {
// A load from the method's TOC (ctable). // A load from the method's TOC (ctable).
assert(cb->is_nmethod(), "must be nmethod"); assert(cb->is_nmethod(), "must be nmethod");
const address ctable = cb->content_begin(); const address ctable = cb->content_begin();
const int toc_offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr); const int toc_offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
*(intptr_t *)(ctable + toc_offset) = data; *(intptr_t *)(ctable + toc_offset) = data;
next_address = addr + BytesPerInstWord; next_address = addr + BytesPerInstWord;
} else if (cb != NULL && } else if (cb != nullptr &&
MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) { MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) {
// A calculation relative to the global TOC. // A calculation relative to the global TOC.
if (MacroAssembler::get_address_of_calculate_address_from_global_toc_at(addr, cb->content_begin()) != if (MacroAssembler::get_address_of_calculate_address_from_global_toc_at(addr, cb->content_begin()) !=
@ -235,7 +235,7 @@ address NativeMovConstReg::set_data_plain(intptr_t data, CodeBlob *cb) {
const address inst1_addr = const address inst1_addr =
MacroAssembler::patch_calculate_address_from_global_toc_at(inst2_addr, cb->content_begin(), MacroAssembler::patch_calculate_address_from_global_toc_at(inst2_addr, cb->content_begin(),
(address)data); (address)data);
assert(inst1_addr != NULL && inst1_addr < inst2_addr, "first instruction must be found"); assert(inst1_addr != nullptr && inst1_addr < inst2_addr, "first instruction must be found");
const int range = inst2_addr - inst1_addr + BytesPerInstWord; const int range = inst2_addr - inst1_addr + BytesPerInstWord;
ICache::ppc64_flush_icache_bytes(inst1_addr, range); ICache::ppc64_flush_icache_bytes(inst1_addr, range);
} }
@ -272,12 +272,12 @@ void NativeMovConstReg::set_data(intptr_t data) {
// Also store the value into an oop_Relocation cell, if any. // Also store the value into an oop_Relocation cell, if any.
if (cb && cb->is_nmethod()) { if (cb && cb->is_nmethod()) {
RelocIterator iter((nmethod *) cb, instruction_address(), next_address); RelocIterator iter((nmethod *) cb, instruction_address(), next_address);
oop* oop_addr = NULL; oop* oop_addr = nullptr;
Metadata** metadata_addr = NULL; Metadata** metadata_addr = nullptr;
while (iter.next()) { while (iter.next()) {
if (iter.type() == relocInfo::oop_type) { if (iter.type() == relocInfo::oop_type) {
oop_Relocation *r = iter.oop_reloc(); oop_Relocation *r = iter.oop_reloc();
if (oop_addr == NULL) { if (oop_addr == nullptr) {
oop_addr = r->oop_addr(); oop_addr = r->oop_addr();
*oop_addr = cast_to_oop(data); *oop_addr = cast_to_oop(data);
} else { } else {
@ -286,7 +286,7 @@ void NativeMovConstReg::set_data(intptr_t data) {
} }
if (iter.type() == relocInfo::metadata_type) { if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation *r = iter.metadata_reloc(); metadata_Relocation *r = iter.metadata_reloc();
if (metadata_addr == NULL) { if (metadata_addr == nullptr) {
metadata_addr = r->metadata_addr(); metadata_addr = r->metadata_addr();
*metadata_addr = (Metadata*)data; *metadata_addr = (Metadata*)data;
} else { } else {
@ -297,16 +297,16 @@ void NativeMovConstReg::set_data(intptr_t data) {
} }
} }
void NativeMovConstReg::set_narrow_oop(narrowOop data, CodeBlob *code /* = NULL */) { void NativeMovConstReg::set_narrow_oop(narrowOop data, CodeBlob *code /* = nullptr */) {
address inst2_addr = addr_at(0); address inst2_addr = addr_at(0);
CodeBlob* cb = (code) ? code : CodeCache::find_blob(instruction_address()); CodeBlob* cb = (code) ? code : CodeCache::find_blob(instruction_address());
assert(cb != NULL, "Could not find code blob"); assert(cb != nullptr, "Could not find code blob");
if (MacroAssembler::get_narrow_oop(inst2_addr, cb->content_begin()) == data) { if (MacroAssembler::get_narrow_oop(inst2_addr, cb->content_begin()) == data) {
return; return;
} }
const address inst1_addr = const address inst1_addr =
MacroAssembler::patch_set_narrow_oop(inst2_addr, cb->content_begin(), data); MacroAssembler::patch_set_narrow_oop(inst2_addr, cb->content_begin(), data);
assert(inst1_addr != NULL && inst1_addr < inst2_addr, "first instruction must be found"); assert(inst1_addr != nullptr && inst1_addr < inst2_addr, "first instruction must be found");
const int range = inst2_addr - inst1_addr + BytesPerInstWord; const int range = inst2_addr - inst1_addr + BytesPerInstWord;
ICache::ppc64_flush_icache_bytes(inst1_addr, range); ICache::ppc64_flush_icache_bytes(inst1_addr, range);
} }
@ -319,8 +319,8 @@ void NativeMovConstReg::verify() {
if (! MacroAssembler::is_load_const_at(addr) && if (! MacroAssembler::is_load_const_at(addr) &&
! MacroAssembler::is_load_const_from_method_toc_at(addr)) { ! MacroAssembler::is_load_const_from_method_toc_at(addr)) {
CodeBlob* cb = CodeCache::find_blob(addr); CodeBlob* cb = CodeCache::find_blob(addr);
if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) && if (! (cb != nullptr && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) &&
! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) && ! (cb != nullptr && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) &&
! MacroAssembler::is_bl(*((int*) addr))) { ! MacroAssembler::is_bl(*((int*) addr))) {
tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr)); tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr));
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty); // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
@ -407,7 +407,7 @@ address NativeCallTrampolineStub::encoded_destination_addr() const {
address NativeCallTrampolineStub::destination(nmethod *nm) const { address NativeCallTrampolineStub::destination(nmethod *nm) const {
CodeBlob* cb = nm ? nm : CodeCache::find_blob(addr_at(0)); CodeBlob* cb = nm ? nm : CodeCache::find_blob(addr_at(0));
assert(cb != NULL, "Could not find code blob"); assert(cb != nullptr, "Could not find code blob");
address ctable = cb->content_begin(); address ctable = cb->content_begin();
return *(address*)(ctable + destination_toc_offset()); return *(address*)(ctable + destination_toc_offset());
@ -419,7 +419,7 @@ int NativeCallTrampolineStub::destination_toc_offset() const {
void NativeCallTrampolineStub::set_destination(address new_destination) { void NativeCallTrampolineStub::set_destination(address new_destination) {
CodeBlob* cb = CodeCache::find_blob(addr_at(0)); CodeBlob* cb = CodeCache::find_blob(addr_at(0));
assert(cb != NULL, "Could not find code blob"); assert(cb != nullptr, "Could not find code blob");
address ctable = cb->content_begin(); address ctable = cb->content_begin();
*(address*)(ctable + destination_toc_offset()) = new_destination; *(address*)(ctable + destination_toc_offset()) = new_destination;
@ -439,7 +439,7 @@ void NativeDeoptInstruction::verify() {
bool NativeDeoptInstruction::is_deopt_at(address code_pos) { bool NativeDeoptInstruction::is_deopt_at(address code_pos) {
if (!Assembler::is_illtrap(code_pos)) return false; if (!Assembler::is_illtrap(code_pos)) return false;
CodeBlob* cb = CodeCache::find_blob(code_pos); CodeBlob* cb = CodeCache::find_blob(code_pos);
if (cb == NULL || !cb->is_compiled()) return false; if (cb == nullptr || !cb->is_compiled()) return false;
nmethod *nm = (nmethod *)cb; nmethod *nm = (nmethod *)cb;
// see NativeInstruction::is_sigill_not_entrant_at() // see NativeInstruction::is_sigill_not_entrant_at()
return nm->verified_entry_point() != code_pos; return nm->verified_entry_point() != code_pos;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved. * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -91,7 +91,7 @@ class NativeInstruction {
return MacroAssembler::is_tdi(long_at(0), Assembler::traptoGreaterThanUnsigned | Assembler::traptoEqual, return MacroAssembler::is_tdi(long_at(0), Assembler::traptoGreaterThanUnsigned | Assembler::traptoEqual,
-1, encoding); -1, encoding);
} }
return MacroAssembler::is_load_from_polling_page(long_at(0), NULL); return MacroAssembler::is_load_from_polling_page(long_at(0), nullptr);
} }
bool is_safepoint_poll_return() { bool is_safepoint_poll_return() {
@ -177,7 +177,7 @@ inline NativeCall* nativeCall_at(address instr) {
} }
inline NativeCall* nativeCall_before(address return_address) { inline NativeCall* nativeCall_before(address return_address) {
NativeCall* call = NULL; NativeCall* call = nullptr;
if (MacroAssembler::is_bl(*(int*)(return_address - 4))) if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
call = (NativeCall*)(return_address - 4); call = (NativeCall*)(return_address - 4);
call->verify(); call->verify();
@ -260,7 +260,7 @@ class NativeMovConstReg: public NativeInstruction {
void set_data(intptr_t x); void set_data(intptr_t x);
// Patch narrow oop constants. // Patch narrow oop constants.
void set_narrow_oop(narrowOop data, CodeBlob *code = NULL); void set_narrow_oop(narrowOop data, CodeBlob *code = nullptr);
void verify() NOT_DEBUG_RETURN; void verify() NOT_DEBUG_RETURN;
}; };
@ -307,7 +307,7 @@ class NativeJump: public NativeInstruction {
return (address)((NativeMovConstReg *)this)->data(); return (address)((NativeMovConstReg *)this)->data();
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return nullptr;
} }
} }
@ -377,7 +377,7 @@ class NativeCallTrampolineStub : public NativeInstruction {
public: public:
address destination(nmethod *nm = NULL) const; address destination(nmethod *nm = nullptr) const;
int destination_toc_offset() const; int destination_toc_offset() const;
void set_destination(address new_destination); void set_destination(address new_destination);
@ -518,7 +518,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) {
if (nop->check()) { if (nop->check()) {
return nop; return nop;
} }
return NULL; return nullptr;
} }
class NativeDeoptInstruction: public NativeInstruction { class NativeDeoptInstruction: public NativeInstruction {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -32,8 +32,8 @@
private: private:
// This is the hook for finding a register in an "well-known" location, // This is the hook for finding a register in an "well-known" location,
// such as a register block of a predetermined format. // such as a register block of a predetermined format.
// Since there is none, we just return NULL. // Since there is none, we just return null.
address pd_location(VMReg reg) const { return NULL; } address pd_location(VMReg reg) const { return nullptr; }
address pd_location(VMReg base_reg, int slot_idx) const { address pd_location(VMReg base_reg, int slot_idx) const {
return location(base_reg->next(slot_idx), nullptr); return location(base_reg->next(slot_idx), nullptr);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -53,7 +53,7 @@ address Relocation::pd_call_destination(address orig_addr) {
intptr_t adj = 0; intptr_t adj = 0;
address inst_loc = addr(); address inst_loc = addr();
if (orig_addr != NULL) { if (orig_addr != nullptr) {
// We just moved this call instruction from orig_addr to addr(). // We just moved this call instruction from orig_addr to addr().
// This means its target will appear to have grown by addr() - orig_addr. // This means its target will appear to have grown by addr() - orig_addr.
adj = -(inst_loc - orig_addr); adj = -(inst_loc - orig_addr);
@ -69,7 +69,7 @@ address Relocation::pd_call_destination(address orig_addr) {
return branch->branch_destination(); return branch->branch_destination();
} else { } else {
orig_addr = nativeCall_at(inst_loc)->get_trampoline(); orig_addr = nativeCall_at(inst_loc)->get_trampoline();
if (orig_addr == NULL) { if (orig_addr == nullptr) {
return (address) -1; return (address) -1;
} else { } else {
return ((NativeCallTrampolineStub*)orig_addr)->destination(); return ((NativeCallTrampolineStub*)orig_addr)->destination();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved. * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -108,7 +108,7 @@ void OptoRuntime::generate_exception_blob() {
address calls_return_pc = __ last_calls_return_pc(); address calls_return_pc = __ last_calls_return_pc();
# ifdef ASSERT # ifdef ASSERT
__ cmpdi(CCR0, R3_RET, 0); __ cmpdi(CCR0, R3_RET, 0);
__ asm_assert_ne("handle_exception_C must not return NULL"); __ asm_assert_ne("handle_exception_C must not return null");
# endif # endif
// Set an oopmap for the call site. This oopmap will only be used if we // Set an oopmap for the call site. This oopmap will only be used if we

View File

@ -97,11 +97,11 @@ class RegisterSaver {
Register r_temp, Register r_temp,
int frame_size, int frame_size,
int total_args, int total_args,
const VMRegPair *regs, const VMRegPair *regs2 = NULL); const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
int frame_size, int frame_size,
int total_args, int total_args,
const VMRegPair *regs, const VMRegPair *regs2 = NULL); const VMRegPair *regs, const VMRegPair *regs2 = nullptr);
// During deoptimization only the result registers need to be restored // During deoptimization only the result registers need to be restored
// all the other values have already been extracted. // all the other values have already been extracted.
@ -279,7 +279,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
const int register_save_offset = frame_size_in_bytes - register_save_size; const int register_save_offset = frame_size_in_bytes - register_save_size;
// OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : nullptr;
BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {"); BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
@ -472,7 +472,7 @@ void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm,
st_off -= wordSize; st_off -= wordSize;
} }
} }
if (regs2 != NULL) { if (regs2 != nullptr) {
for (int i = 0; i < total_args; i++) { for (int i = 0; i < total_args; i++) {
VMReg r_1 = regs2[i].first(); VMReg r_1 = regs2[i].first();
VMReg r_2 = regs2[i].second(); VMReg r_2 = regs2[i].second();
@ -510,7 +510,7 @@ void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm
st_off -= wordSize; st_off -= wordSize;
} }
} }
if (regs2 != NULL) if (regs2 != nullptr)
for (int i = 0; i < total_args; i++) { for (int i = 0; i < total_args; i++) {
VMReg r_1 = regs2[i].first(); VMReg r_1 = regs2[i].first();
VMReg r_2 = regs2[i].second(); VMReg r_2 = regs2[i].second();
@ -803,11 +803,11 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
"passing C arguments in wrong stack slots"); "passing C arguments in wrong stack slots");
#endif #endif
// We fill-out regs AND regs2 if an argument must be passed in a // We fill-out regs AND regs2 if an argument must be passed in a
// register AND in a stack slot. If regs2 is NULL in such a // register AND in a stack slot. If regs2 is null in such a
// situation, we bail-out with a fatal error. // situation, we bail-out with a fatal error.
for (int i = 0; i < total_args_passed; ++i, ++arg) { for (int i = 0; i < total_args_passed; ++i, ++arg) {
// Initialize regs2 to BAD. // Initialize regs2 to BAD.
if (regs2 != NULL) regs2[i].set_bad(); if (regs2 != nullptr) regs2[i].set_bad();
switch(sig_bt[i]) { switch(sig_bt[i]) {
@ -871,7 +871,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// convention, too. // convention, too.
if (arg >= Argument::n_regs_not_on_stack_c) { if (arg >= Argument::n_regs_not_on_stack_c) {
// ... and on the stack. // ... and on the stack.
guarantee(regs2 != NULL, "must pass float in register and stack slot"); guarantee(regs2 != nullptr, "must pass float in register and stack slot");
VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT); VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
regs2[i].set1(reg2); regs2[i].set1(reg2);
stk += inc_stk_for_intfloat; stk += inc_stk_for_intfloat;
@ -898,7 +898,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// convention, too. // convention, too.
if (arg >= Argument::n_regs_not_on_stack_c) { if (arg >= Argument::n_regs_not_on_stack_c) {
// ... and on the stack. // ... and on the stack.
guarantee(regs2 != NULL, "must pass float in register and stack slot"); guarantee(regs2 != nullptr, "must pass float in register and stack slot");
VMReg reg2 = VMRegImpl::stack2reg(stk); VMReg reg2 = VMRegImpl::stack2reg(stk);
regs2[i].set2(reg2); regs2[i].set2(reg2);
stk += inc_stk_for_longdouble; stk += inc_stk_for_longdouble;
@ -968,7 +968,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
__ beq(CCR0, call_interpreter); __ beq(CCR0, call_interpreter);
// Patch caller's callsite, method_(code) was not NULL which means that // Patch caller's callsite, method_(code) was not null which means that
// compiled code exists. // compiled code exists.
__ mflr(return_pc); __ mflr(return_pc);
__ std(return_pc, _abi0(lr), R1_SP); __ std(return_pc, _abi0(lr), R1_SP);
@ -1236,7 +1236,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
"klass offset should reach into any page"); "klass offset should reach into any page");
// Check for NULL argument if we don't have implicit null checks. // Check for null argument if we don't have implicit null checks.
if (!ImplicitNullChecks || !os::zero_page_read_protected()) { if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
if (TrapBasedNullChecks) { if (TrapBasedNullChecks) {
__ trap_null_check(R3_ARG1); __ trap_null_check(R3_ARG1);
@ -1250,7 +1250,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
__ BIND(valid); __ BIND(valid);
} }
} }
// Assume argument is not NULL, load klass from receiver. // Assume argument is not null, load klass from receiver.
__ load_klass(receiver_klass, R3_ARG1); __ load_klass(receiver_klass, R3_ARG1);
__ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic); __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
@ -1286,7 +1286,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
c2i_entry = __ pc(); c2i_entry = __ pc();
// Class initialization barrier for static methods // Class initialization barrier for static methods
address c2i_no_clinit_check_entry = NULL; address c2i_no_clinit_check_entry = nullptr;
if (VM_Version::supports_fast_class_init_checks()) { if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier; Label L_skip_barrier;
@ -1343,7 +1343,7 @@ static void object_move(MacroAssembler* masm,
__ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
__ cmpdi(CCR0, r_temp_2, 0); __ cmpdi(CCR0, r_temp_2, 0);
__ bne(CCR0, skip); __ bne(CCR0, skip);
// Use a NULL handle if oop is NULL. // Use a null handle if oop is null.
__ li(r_handle, 0); __ li(r_handle, 0);
__ bind(skip); __ bind(skip);
@ -1373,7 +1373,7 @@ static void object_move(MacroAssembler* masm,
__ cmpdi(CCR0, r_oop, 0); __ cmpdi(CCR0, r_oop, 0);
__ bne(CCR0, skip); __ bne(CCR0, skip);
// Use a NULL handle if oop is NULL. // Use a null handle if oop is null.
__ li(r_handle, 0); __ li(r_handle, 0);
__ bind(skip); __ bind(skip);
@ -2114,11 +2114,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
stack_slots / VMRegImpl::slots_per_word, stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1), in_ByteSize(-1),
in_ByteSize(-1), in_ByteSize(-1),
(OopMapSet*)NULL); (OopMapSet*)nullptr);
} }
address native_func = method->native_function(); address native_func = method->native_function();
assert(native_func != NULL, "must have function"); assert(native_func != nullptr, "must have function");
// First, create signature for outgoing C call // First, create signature for outgoing C call
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@ -2141,7 +2141,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL; BasicType* in_elem_bt = nullptr;
// Create the signature for the C call: // Create the signature for the C call:
// 1) add the JNIEnv* // 1) add the JNIEnv*
@ -2946,7 +2946,7 @@ void SharedRuntime::generate_deopt_blob() {
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
Label exec_mode_initialized; Label exec_mode_initialized;
int frame_size_in_words; int frame_size_in_words;
OopMap* map = NULL; OopMap* map = nullptr;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
// size of ABI112 plus spill slots for R3_RET and F1_RET. // size of ABI112 plus spill slots for R3_RET and F1_RET.
@ -2988,7 +2988,7 @@ void SharedRuntime::generate_deopt_blob() {
/*generate_oop_map=*/ true, /*generate_oop_map=*/ true,
return_pc_adjustment_no_exception, return_pc_adjustment_no_exception,
RegisterSaver::return_pc_is_lr); RegisterSaver::return_pc_is_lr);
assert(map != NULL, "OopMap must have been created"); assert(map != nullptr, "OopMap must have been created");
__ li(exec_mode_reg, Deoptimization::Unpack_deopt); __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
// Save exec mode for unpack_frames. // Save exec mode for unpack_frames.
@ -3298,7 +3298,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
assert(StubRoutines::forward_exception_entry() != NULL, assert(StubRoutines::forward_exception_entry() != nullptr,
"must be generated before"); "must be generated before");
ResourceMark rm; ResourceMark rm;
@ -3428,7 +3428,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
int frame_size_in_bytes; int frame_size_in_bytes;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
OopMap* map = NULL; OopMap* map = nullptr;
address start = __ pc(); address start = __ pc();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -83,7 +83,7 @@ public:
bool should_skip_missing() const { return false; } bool should_skip_missing() const { return false; }
VMReg find_register_spilled_here(void* p, intptr_t* sp) { VMReg find_register_spilled_here(void* p, intptr_t* sp) {
Unimplemented(); Unimplemented();
return NULL; return nullptr;
} }
void print() const { print_on(tty); } void print() const { print_on(tty); }
void print_on(outputStream* st) const { st->print_cr("Small register map"); } void print_on(outputStream* st) const { st->print_cr("Small register map"); }

View File

@ -412,7 +412,7 @@ class StubGenerator: public StubCodeGenerator {
__ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread); __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
// complete return to VM // complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); assert(StubRoutines::_call_stub_return_address != nullptr, "must have been generated before");
__ mtlr(R4_ARG2); __ mtlr(R4_ARG2);
// continue in call stub // continue in call stub
@ -2134,7 +2134,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_miss; Label L_miss;
__ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL, __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, nullptr,
super_check_offset); super_check_offset);
__ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success); __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success);
@ -2412,15 +2412,15 @@ class StubGenerator: public StubCodeGenerator {
// (2) src_pos must not be negative. // (2) src_pos must not be negative.
// (3) dst_pos must not be negative. // (3) dst_pos must not be negative.
// (4) length must not be negative. // (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL. // (5) src klass and dst klass should be the same and not null.
// (6) src and dst should be arrays. // (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src. // (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst. // (8) dst_pos + length must not exceed length of dst.
BLOCK_COMMENT("arraycopy initial argument checks"); BLOCK_COMMENT("arraycopy initial argument checks");
__ cmpdi(CCR1, src, 0); // if (src == NULL) return -1; __ cmpdi(CCR1, src, 0); // if (src == nullptr) return -1;
__ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1; __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
__ cmpdi(CCR5, dst, 0); // if (dst == NULL) return -1; __ cmpdi(CCR5, dst, 0); // if (dst == nullptr) return -1;
__ cror(CCR1, Assembler::equal, CCR0, Assembler::less); __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
__ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1; __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
__ cror(CCR5, Assembler::equal, CCR0, Assembler::less); __ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
@ -4713,7 +4713,7 @@ class StubGenerator: public StubCodeGenerator {
// nmethod entry barriers for concurrent class unloading // nmethod entry barriers for concurrent class unloading
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) { if (bs_nm != nullptr) {
StubRoutines::ppc::_nmethod_entry_barrier = generate_nmethod_entry_barrier(); StubRoutines::ppc::_nmethod_entry_barrier = generate_nmethod_entry_barrier();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -86,7 +86,7 @@ address StubRoutines::ppc::generate_crc_constants(juint reverse_poly) {
const int size = use_vector ? CRC32_TABLE_SIZE + vector_size : (4 BIG_ENDIAN_ONLY(+1)) * CRC32_TABLE_SIZE; const int size = use_vector ? CRC32_TABLE_SIZE + vector_size : (4 BIG_ENDIAN_ONLY(+1)) * CRC32_TABLE_SIZE;
const address consts = (address)os::malloc(size, mtInternal); const address consts = (address)os::malloc(size, mtInternal);
if (consts == NULL) { if (consts == nullptr) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CRC constants: no enough space"); vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CRC constants: no enough space");
} }
juint* ptr = (juint*)consts; juint* ptr = (juint*)consts;

View File

@ -523,7 +523,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
// If the receiver is null then it is OK to jump to the slow path. // If the receiver is null then it is OK to jump to the slow path.
__ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver __ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver
// Check if receiver == NULL and go the slow path. // Check if receiver == nullptr and go the slow path.
__ cmpdi(CCR0, R3_RET, 0); __ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, slow_path); __ beq(CCR0, slow_path);
@ -693,7 +693,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ check_and_forward_exception(R11_scratch1, R12_scratch2); __ check_and_forward_exception(R11_scratch1, R12_scratch2);
// Start executing bytecodes. // Start executing bytecodes.
if (continuation == NULL) { if (continuation == nullptr) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else { } else {
__ jump_to_entry(continuation, R11_scratch1); __ jump_to_entry(continuation, R11_scratch1);
@ -766,9 +766,9 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_ent
// Generate code to initiate compilation on the counter overflow. // Generate code to initiate compilation on the counter overflow.
// InterpreterRuntime::frequency_counter_overflow takes one arguments, // InterpreterRuntime::frequency_counter_overflow takes one arguments,
// which indicates if the counter overflow occurs at a backwards branch (NULL bcp) // which indicates if the counter overflow occurs at a backwards branch (null bcp)
// We pass zero in. // We pass zero in.
// The call returns the address of the verified entry point for the method or NULL // The call returns the address of the verified entry point for the method or null
// if the compilation did not complete (either went background or bailed out). // if the compilation did not complete (either went background or bailed out).
// //
// Unlike the C++ interpreter above: Check exceptions! // Unlike the C++ interpreter above: Check exceptions!
@ -778,7 +778,7 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_ent
__ li(R4_ARG2, 0); __ li(R4_ARG2, 0);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
// Returns verified_entry_point or NULL. // Returns verified_entry_point or null.
// We ignore it in any case. // We ignore it in any case.
__ b(continue_entry); __ b(continue_entry);
} }
@ -801,7 +801,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
__ bgt(CCR0/*is_stack_overflow*/, done); __ bgt(CCR0/*is_stack_overflow*/, done);
// The stack overflows. Load target address of the runtime stub and call it. // The stack overflows. Load target address of the runtime stub and call it.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order"); assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "generated in wrong order");
__ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0); __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
__ mtctr(Rscratch1); __ mtctr(Rscratch1);
// Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame). // Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame).
@ -1074,7 +1074,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
// Decide what to do: Use same platform specific instructions and runtime calls as compilers. // Decide what to do: Use same platform specific instructions and runtime calls as compilers.
bool use_instruction = false; bool use_instruction = false;
address runtime_entry = NULL; address runtime_entry = nullptr;
int num_args = 1; int num_args = 1;
bool double_precision = true; bool double_precision = true;
@ -1103,7 +1103,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} }
// Use normal entry if neither instruction nor runtime call is used. // Use normal entry if neither instruction nor runtime call is used.
if (!use_instruction && runtime_entry == NULL) return NULL; if (!use_instruction && runtime_entry == nullptr) return nullptr;
address entry = __ pc(); address entry = __ pc();
@ -2067,7 +2067,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ bne(CCR0, L_done); __ bne(CCR0, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. // Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
__ ld(R4_ARG2, 0, R18_locals); __ ld(R4_ARG2, 0, R18_locals);
__ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp); __ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp);
@ -2197,7 +2197,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
//__ flush_bundle(); //__ flush_bundle();
address entry = __ pc(); address entry = __ pc();
const char *bname = NULL; const char *bname = nullptr;
uint tsize = 0; uint tsize = 0;
switch(state) { switch(state) {
case ftos: case ftos:
@ -2319,7 +2319,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
// The run-time runtime saves the right registers, depending on // The run-time runtime saves the right registers, depending on
// the tosca in-state for the given template. // the tosca in-state for the given template.
assert(Interpreter::trace_code(t->tos_in()) != NULL, assert(Interpreter::trace_code(t->tos_in()) != nullptr,
"entry must have been generated"); "entry must have been generated");
// Note: we destroy LR here. // Note: we destroy LR here.

View File

@ -313,7 +313,7 @@ void TemplateTable::fast_aldc(LdcType type) {
__ get_cache_index_at_bcp(R31, 1, index_size); // Load index. __ get_cache_index_at_bcp(R31, 1, index_size); // Load index.
__ load_resolved_reference_at_index(R17_tos, R31, R11_scratch1, R12_scratch2, &is_null); __ load_resolved_reference_at_index(R17_tos, R31, R11_scratch1, R12_scratch2, &is_null);
// Convert null sentinel to NULL // Convert null sentinel to null
int simm16_rest = __ load_const_optimized(R11_scratch1, Universe::the_null_sentinel_addr(), R0, true); int simm16_rest = __ load_const_optimized(R11_scratch1, Universe::the_null_sentinel_addr(), R0, true);
__ ld(R31, simm16_rest, R11_scratch1); __ ld(R31, simm16_rest, R11_scratch1);
__ resolve_oop_handle(R31, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_NONE); __ resolve_oop_handle(R31, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_NONE);
@ -995,7 +995,7 @@ void TemplateTable::aastore() {
// Rindex is dead! // Rindex is dead!
Register Rscratch3 = Rindex; Register Rscratch3 = Rindex;
// Do array store check - check for NULL value first. // Do array store check - check for null value first.
__ cmpdi(CCR0, R17_tos, 0); __ cmpdi(CCR0, R17_tos, 0);
__ beq(CCR0, Lis_null); __ beq(CCR0, Lis_null);
@ -2240,7 +2240,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ load_resolved_method_at_index(byte_no, Rcache, method); __ load_resolved_method_at_index(byte_no, Rcache, method);
__ load_method_holder(klass, method); __ load_method_holder(klass, method);
__ clinit_barrier(klass, R16_thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow); __ clinit_barrier(klass, R16_thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
} }
__ bind(Ldone); __ bind(Ldone);
@ -2457,7 +2457,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch,
} }
__ verify_oop(R17_tos); __ verify_oop(R17_tos);
} }
// tos: object pointer or NULL if static // tos: object pointer or null if static
// cache: cache entry pointer // cache: cache entry pointer
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
if (!is_static && has_tos) { if (!is_static && has_tos) {
@ -3546,7 +3546,7 @@ void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Regis
__ sldi(Rret_type, Rret_type, LogBytesPerWord); __ sldi(Rret_type, Rret_type, LogBytesPerWord);
__ ldx(Rret_addr, Rret_type, Rtable_addr); __ ldx(Rret_addr, Rret_type, Rtable_addr);
// Load receiver and receiver NULL check. // Load receiver and receiver null check.
__ load_receiver(Rnum_params, Rrecv); __ load_receiver(Rnum_params, Rrecv);
__ null_check_throw(Rrecv, -1, Rscratch1); __ null_check_throw(Rrecv, -1, Rscratch1);
@ -3570,7 +3570,7 @@ void TemplateTable::invokespecial(int byte_no) {
prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1, R12_scratch2); prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1, R12_scratch2);
// Receiver NULL check. // Receiver null check.
__ null_check_throw(Rreceiver, -1, R11_scratch1); __ null_check_throw(Rreceiver, -1, R11_scratch1);
__ profile_call(R11_scratch1, R12_scratch2); __ profile_call(R11_scratch1, R12_scratch2);
@ -3705,7 +3705,7 @@ void TemplateTable::invokeinterface(int byte_no) {
__ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true); __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true);
__ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2); __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2);
// Vtable entry was NULL => Throw abstract method error. // Vtable entry was null => Throw abstract method error.
__ bind(Lthrow_ame); __ bind(Lthrow_ame);
// Pass arguments for generating a verbose error message. // Pass arguments for generating a verbose error message.
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 SAP SE. All rights reserved. * Copyright (c) 2012, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -427,7 +427,7 @@ void VM_Version::check_virtualizations() {
#if defined(_AIX) #if defined(_AIX)
int rc = 0; int rc = 0;
perfstat_partition_total_t pinfo; perfstat_partition_total_t pinfo;
rc = perfstat_partition_total(NULL, &pinfo, sizeof(perfstat_partition_total_t), 1); rc = perfstat_partition_total(nullptr, &pinfo, sizeof(perfstat_partition_total_t), 1);
if (rc == 1) { if (rc == 1) {
Abstract_VM_Version::_detected_virtualization = PowerVM; Abstract_VM_Version::_detected_virtualization = PowerVM;
} }
@ -437,14 +437,14 @@ void VM_Version::check_virtualizations() {
// e.g. system_type=IBM pSeries (emulated by qemu) // e.g. system_type=IBM pSeries (emulated by qemu)
char line[500]; char line[500];
FILE* fp = os::fopen(info_file, "r"); FILE* fp = os::fopen(info_file, "r");
if (fp == NULL) { if (fp == nullptr) {
return; return;
} }
const char* system_type="system_type="; // in case this line contains qemu, it is KVM const char* system_type="system_type="; // in case this line contains qemu, it is KVM
const char* num_lpars="NumLpars="; // in case of non-KVM : if this line is found it is PowerVM const char* num_lpars="NumLpars="; // in case of non-KVM : if this line is found it is PowerVM
bool num_lpars_found = false; bool num_lpars_found = false;
while (fgets(line, sizeof(line), fp) != NULL) { while (fgets(line, sizeof(line), fp) != nullptr) {
if (strncmp(line, system_type, strlen(system_type)) == 0) { if (strncmp(line, system_type, strlen(system_type)) == 0) {
if (strstr(line, "qemu") != 0) { if (strstr(line, "qemu") != 0) {
Abstract_VM_Version::_detected_virtualization = PowerKVM; Abstract_VM_Version::_detected_virtualization = PowerKVM;
@ -472,7 +472,7 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
int rc = 0; int rc = 0;
perfstat_partition_total_t pinfo; perfstat_partition_total_t pinfo;
memset(&pinfo, 0, sizeof(perfstat_partition_total_t)); memset(&pinfo, 0, sizeof(perfstat_partition_total_t));
rc = perfstat_partition_total(NULL, &pinfo, sizeof(perfstat_partition_total_t), 1); rc = perfstat_partition_total(nullptr, &pinfo, sizeof(perfstat_partition_total_t), 1);
if (rc != 1) { if (rc != 1) {
return; return;
} else { } else {
@ -481,7 +481,7 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
// CPU information // CPU information
perfstat_cpu_total_t cpuinfo; perfstat_cpu_total_t cpuinfo;
memset(&cpuinfo, 0, sizeof(perfstat_cpu_total_t)); memset(&cpuinfo, 0, sizeof(perfstat_cpu_total_t));
rc = perfstat_cpu_total(NULL, &cpuinfo, sizeof(perfstat_cpu_total_t), 1); rc = perfstat_cpu_total(nullptr, &cpuinfo, sizeof(perfstat_cpu_total_t), 1);
if (rc != 1) { if (rc != 1) {
return; return;
} }
@ -532,7 +532,7 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
"pool=", // CPU-pool number "pool=", // CPU-pool number
"pool_capacity=", "pool_capacity=",
"NumLpars=", // on non-KVM machines, NumLpars is not found for full partition mode machines "NumLpars=", // on non-KVM machines, NumLpars is not found for full partition mode machines
NULL }; nullptr };
if (!print_matching_lines_from_file(info_file, st, kw)) { if (!print_matching_lines_from_file(info_file, st, kw)) {
st->print_cr(" <%s Not Available>", info_file); st->print_cr(" <%s Not Available>", info_file);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved. * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -49,9 +49,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true); const int stub_code_length = code_size_limit(true);
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.
@ -120,7 +120,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// if the vtable entry is null, the method is abstract // if the vtable entry is null, the method is abstract
// NOTE: for vtable dispatches, the vtable entry will never be null. // NOTE: for vtable dispatches, the vtable entry will never be null.
__ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/NULL); __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/nullptr);
__ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
__ mtctr(R12_scratch2); __ mtctr(R12_scratch2);
__ bctr(); __ bctr();
@ -135,9 +135,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false); const int stub_code_length = code_size_limit(false);
VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache. // Can be null if there is no free space in the code cache.
if (s == NULL) { if (s == nullptr) {
return NULL; return nullptr;
} }
// Count unused bytes in instruction sequences of variable size. // Count unused bytes in instruction sequences of variable size.