8191278: MappedByteBuffer bulk access memory failures are not handled gracefully

Unsafe.copy*Memory access failures are handled gracefully.

Co-authored-by: Harold Seigel <harold.seigel@oracle.com>
Reviewed-by: kvn, dcubed, thartmann, coleenp, aph
This commit is contained in:
Jamsheed Mohammed C M 2019-06-24 11:37:56 -07:00
parent 4a7e2b57ea
commit aedbb75803
26 changed files with 1827 additions and 1242 deletions

View File

@ -1383,7 +1383,12 @@ class StubGenerator: public StubCodeGenerator {
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
}
{
// UnsafeCopyMemory page error: continue after ucm
bool add_entry = !is_oop && (!aligned || sizeof(jlong) == size);
UnsafeCopyMemoryMark ucmm(this, add_entry, true);
copy_memory(aligned, s, d, count, rscratch1, size);
}
if (is_oop) {
__ pop(RegSet::of(d, count), sp);
@ -1455,7 +1460,12 @@ class StubGenerator: public StubCodeGenerator {
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
}
{
// UnsafeCopyMemory page error: continue after ucm
bool add_entry = !is_oop && (!aligned || sizeof(jlong) == size);
UnsafeCopyMemoryMark ucmm(this, add_entry, true);
copy_memory(aligned, s, d, count, rscratch1, -size);
}
if (is_oop) {
__ pop(RegSet::of(d, count), sp);
if (VerifyOops)
@ -5816,6 +5826,10 @@ class StubGenerator: public StubCodeGenerator {
}
}; // end class declaration
#define UCM_TABLE_MAX_ENTRIES 8
void StubGenerator_generate(CodeBuffer* code, bool all) {
if (UnsafeCopyMemory::_table == NULL) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, all);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -928,7 +928,7 @@ class StubGenerator: public StubCodeGenerator {
// Scratches 'count', R3.
// R4-R10 are preserved (saved/restored).
//
int generate_forward_aligned_copy_loop(Register from, Register to, Register count, int bytes_per_count) {
int generate_forward_aligned_copy_loop(Register from, Register to, Register count, int bytes_per_count, bool unsafe_copy = false) {
assert (from == R0 && to == R1 && count == R2, "adjust the implementation below");
const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
@ -954,6 +954,9 @@ class StubGenerator: public StubCodeGenerator {
Label L_skip_pld;
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, unsafe_copy, true);
// predecrease to exit when there is less than count_per_loop
__ sub_32(count, count, count_per_loop);
@ -1056,6 +1059,7 @@ class StubGenerator: public StubCodeGenerator {
__ ldrb(R3, Address(from, 1, post_indexed), ne);
__ strb(R3, Address(to, 1, post_indexed), ne);
}
}
__ pop(RegisterSet(R4,R10));
@ -1083,7 +1087,7 @@ class StubGenerator: public StubCodeGenerator {
// Scratches 'count', R3.
// ARM R4-R10 are preserved (saved/restored).
//
int generate_backward_aligned_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count) {
int generate_backward_aligned_copy_loop(Register end_from, Register end_to, Register count, int bytes_per_count, bool unsafe_copy = false) {
assert (end_from == R0 && end_to == R1 && count == R2, "adjust the implementation below");
const int bytes_per_loop = 8*wordSize; // 8 registers are read and written on every loop iteration
@ -1099,6 +1103,9 @@ class StubGenerator: public StubCodeGenerator {
__ push(RegisterSet(R4,R10));
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, unsafe_copy, true);
__ sub_32(count, count, count_per_loop);
const bool prefetch_before = pld_offset < 0;
@ -1194,7 +1201,7 @@ class StubGenerator: public StubCodeGenerator {
__ ldrb(R3, Address(end_from, -1, pre_indexed), ne);
__ strb(R3, Address(end_to, -1, pre_indexed), ne);
}
}
__ pop(RegisterSet(R4,R10));
return count_per_loop;
@ -1749,9 +1756,12 @@ class StubGenerator: public StubCodeGenerator {
//
// Notes:
// shifts 'from' and 'to'
void copy_small_array(Register from, Register to, Register count, Register tmp, Register tmp2, int bytes_per_count, bool forward, Label & entry) {
void copy_small_array(Register from, Register to, Register count, Register tmp, Register tmp2, int bytes_per_count, bool forward, Label & entry, bool unsafe_copy = false) {
assert_different_registers(from, to, count, tmp);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, unsafe_copy, true);
__ align(OptoLoopAlignment);
Label L_small_loop;
__ BIND(L_small_loop);
@ -1761,6 +1771,7 @@ class StubGenerator: public StubCodeGenerator {
load_one(tmp, from, bytes_per_count, forward, ge, tmp2);
__ b(L_small_loop, ge);
}
}
// Aligns 'to' by reading one word from 'from' and writting its part to 'to'.
//
@ -1876,7 +1887,7 @@ class StubGenerator: public StubCodeGenerator {
//
// Scratches 'from', 'count', R3 and R12.
// R4-R10 saved for use.
int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, bool forward) {
int align_dst_and_generate_shifted_copy_loop(Register from, Register to, Register count, int bytes_per_count, bool forward, bool unsafe_copy = false) {
const Register Rval = forward ? R12 : R3; // as generate_{forward,backward}_shifted_copy_loop expect
@ -1886,6 +1897,10 @@ class StubGenerator: public StubCodeGenerator {
// then the remainder of 'to' divided by wordSize is one of elements of {seq}.
__ push(RegisterSet(R4,R10));
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, unsafe_copy, true);
load_one(Rval, from, wordSize, forward);
switch (bytes_per_count) {
@ -1939,7 +1954,7 @@ class StubGenerator: public StubCodeGenerator {
ShouldNotReachHere();
break;
}
}
__ pop(RegisterSet(R4,R10));
return min_copy;
@ -1963,6 +1978,13 @@ class StubGenerator: public StubCodeGenerator {
}
#endif // !PRODUCT
address generate_unsafecopy_common_error_exit() {
address start_pc = __ pc();
__ mov(R0, 0);
__ ret();
return start_pc;
}
//
// Generate stub for primitive array copy. If "aligned" is true, the
// "from" and "to" addresses are assumed to be heapword aligned.
@ -2033,8 +2055,13 @@ class StubGenerator: public StubCodeGenerator {
from_is_aligned = true;
}
int count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
int count_required_to_align = 0;
{
// UnsafeCopyMemoryMark page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
count_required_to_align = from_is_aligned ? 0 : align_src(from, to, count, tmp1, bytes_per_count, forward);
assert (small_copy_limit >= count_required_to_align, "alignment could exhaust count");
}
// now 'from' is aligned
@ -2064,9 +2091,9 @@ class StubGenerator: public StubCodeGenerator {
int min_copy;
if (forward) {
min_copy = generate_forward_aligned_copy_loop (from, to, count, bytes_per_count);
min_copy = generate_forward_aligned_copy_loop(from, to, count, bytes_per_count, !aligned /*add UnsafeCopyMemory entry*/);
} else {
min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count);
min_copy = generate_backward_aligned_copy_loop(from, to, count, bytes_per_count, !aligned /*add UnsafeCopyMemory entry*/);
}
assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
@ -2077,7 +2104,7 @@ class StubGenerator: public StubCodeGenerator {
__ ret();
{
copy_small_array(from, to, count, tmp1, tmp2, bytes_per_count, forward, L_small_array /* entry */);
copy_small_array(from, to, count, tmp1, tmp2, bytes_per_count, forward, L_small_array /* entry */, !aligned /*add UnsafeCopyMemory entry*/);
if (status) {
__ mov(R0, 0); // OK
@ -2088,7 +2115,7 @@ class StubGenerator: public StubCodeGenerator {
if (! to_is_aligned) {
__ BIND(L_unaligned_dst);
int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward, !aligned /*add UnsafeCopyMemory entry*/);
assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
if (status) {
@ -2873,6 +2900,9 @@ class StubGenerator: public StubCodeGenerator {
status = true; // generate a status compatible with C1 calls
#endif
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeCopyMemory::set_common_exit_stub_pc(ucm_common_error_exit);
// these need always status in case they are called from generic_arraycopy
StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(false, "jbyte_disjoint_arraycopy", true, 1, true);
StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(false, "jshort_disjoint_arraycopy", true, 2, true);
@ -3055,6 +3085,10 @@ class StubGenerator: public StubCodeGenerator {
}
}; // end class declaration
#define UCM_TABLE_MAX_ENTRIES 32
void StubGenerator_generate(CodeBuffer* code, bool all) {
if (UnsafeCopyMemory::_table == NULL) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, all);
}

View File

@ -952,6 +952,20 @@ class StubGenerator: public StubCodeGenerator {
// need to copy backwards
}
// This is common errorexit stub for UnsafeCopyMemory.
address generate_unsafecopy_common_error_exit() {
address start_pc = __ pc();
Register tmp1 = R6_ARG4;
// probably copy stub would have changed value reset it.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
__ mtdscr(tmp1);
}
__ li(R3_RET, 0); // return 0
__ blr();
return start_pc;
}
// The guideline in the implementations of generate_disjoint_xxx_copy
// (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
// single instructions, but to avoid alignment interrupts (see subsequent
@ -989,6 +1003,9 @@ class StubGenerator: public StubCodeGenerator {
VectorSRegister tmp_vsr2 = VSR2;
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10;
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
// Don't try anything fancy if arrays don't have many elements.
__ li(tmp3, 0);
@ -1134,6 +1151,7 @@ class StubGenerator: public StubCodeGenerator {
__ stbu(tmp2, 1, R4_ARG2);
__ bdnz(l_5);
}
}
__ bind(l_4);
__ li(R3_RET, 0); // return 0
@ -1167,7 +1185,9 @@ class StubGenerator: public StubCodeGenerator {
// Do reverse copy. We assume the case of actual overlap is rare enough
// that we don't have to optimize it.
Label l_1, l_2;
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
__ b(l_2);
__ bind(l_1);
__ stbx(tmp1, R4_ARG2, R5_ARG3);
@ -1175,7 +1195,7 @@ class StubGenerator: public StubCodeGenerator {
__ addic_(R5_ARG3, R5_ARG3, -1);
__ lbzx(tmp1, R3_ARG1, R5_ARG3);
__ bge(CCR0, l_1);
}
__ li(R3_RET, 0); // return 0
__ blr();
@ -1252,7 +1272,9 @@ class StubGenerator: public StubCodeGenerator {
assert_positive_int(R5_ARG3);
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
// don't try anything fancy if arrays don't have many elements
__ li(tmp3, 0);
__ cmpwi(CCR0, R5_ARG3, 9);
@ -1401,6 +1423,8 @@ class StubGenerator: public StubCodeGenerator {
__ sthu(tmp2, 2, R4_ARG2);
__ bdnz(l_5);
}
}
__ bind(l_4);
__ li(R3_RET, 0); // return 0
__ blr();
@ -1432,6 +1456,9 @@ class StubGenerator: public StubCodeGenerator {
array_overlap_test(nooverlap_target, 1);
Label l_1, l_2;
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
__ sldi(tmp1, R5_ARG3, 1);
__ b(l_2);
__ bind(l_1);
@ -1440,7 +1467,7 @@ class StubGenerator: public StubCodeGenerator {
__ addic_(tmp1, tmp1, -2);
__ lhzx(tmp2, R3_ARG1, tmp1);
__ bge(CCR0, l_1);
}
__ li(R3_RET, 0); // return 0
__ blr();
@ -1588,7 +1615,11 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry();
assert_positive_int(R5_ARG3);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
generate_disjoint_int_copy_core(aligned);
}
__ li(R3_RET, 0); // return 0
__ blr();
return start;
@ -1736,8 +1767,11 @@ class StubGenerator: public StubCodeGenerator {
STUB_ENTRY(jint_disjoint_arraycopy);
array_overlap_test(nooverlap_target, 2);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
generate_conjoint_int_copy_core(aligned);
}
__ li(R3_RET, 0); // return 0
__ blr();
@ -1859,7 +1893,11 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry();
assert_positive_int(R5_ARG3);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
generate_disjoint_long_copy_core(aligned);
}
__ li(R3_RET, 0); // return 0
__ blr();
@ -1986,8 +2024,11 @@ class StubGenerator: public StubCodeGenerator {
STUB_ENTRY(jlong_disjoint_arraycopy);
array_overlap_test(nooverlap_target, 3);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
generate_conjoint_long_copy_core(aligned);
}
__ li(R3_RET, 0); // return 0
__ blr();
@ -3008,6 +3049,9 @@ class StubGenerator: public StubCodeGenerator {
// Note: the disjoint stubs must be generated first, some of
// the conjoint stubs use them.
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeCopyMemory::set_common_exit_stub_pc(ucm_common_error_exit);
// non-aligned disjoint versions
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
@ -3579,6 +3623,10 @@ class StubGenerator: public StubCodeGenerator {
}
};
#define UCM_TABLE_MAX_ENTRIES 8
void StubGenerator_generate(CodeBuffer* code, bool all) {
if (UnsafeCopyMemory::_table == NULL) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, all);
}

View File

@ -1076,6 +1076,17 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->add(end_from, left_shift, end_from); // restore address
}
address generate_unsafecopy_common_error_exit() {
address start_pc = __ pc();
if (UseBlockCopy) {
__ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
__ membar(Assembler::StoreLoad);
}
__ retl();
__ delayed()->mov(G0, O0); // return 0
return start_pc;
}
//
// Generate stub for disjoint byte copy. If "aligned" is true, the
// "from" and "to" addresses are assumed to be heapword aligned.
@ -1107,6 +1118,10 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
// for short arrays, just do single element copy
__ cmp(count, 23); // 16 + 7
__ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
@ -1162,6 +1177,7 @@ class StubGenerator: public StubCodeGenerator {
__ stb(O3, to, offset);
__ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
__ delayed()->inc(offset);
}
__ BIND(L_exit);
// O3, O4 are used as temp registers
@ -1207,6 +1223,10 @@ class StubGenerator: public StubCodeGenerator {
array_overlap_test(nooverlap_target, 0);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
__ add(to, count, end_to); // offset after last copied element
// for short arrays, just do single element copy
@ -1271,6 +1291,7 @@ class StubGenerator: public StubCodeGenerator {
__ deccc(count);
__ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
__ delayed()->stb(O4, end_to, 0);
}
__ BIND(L_exit);
// O3, O4 are used as temp registers
@ -1311,6 +1332,9 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
// for short arrays, just do single element copy
__ cmp(count, 11); // 8 + 3 (22 bytes)
__ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
@ -1373,6 +1397,7 @@ class StubGenerator: public StubCodeGenerator {
__ sth(O3, to, offset);
__ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
__ delayed()->inc(offset, 2);
}
__ BIND(L_exit);
// O3, O4 are used as temp registers
@ -1639,6 +1664,10 @@ class StubGenerator: public StubCodeGenerator {
array_overlap_test(nooverlap_target, 1);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
__ sllx(count, LogBytesPerShort, byte_count);
__ add(to, byte_count, end_to); // offset after last copied element
@ -1711,7 +1740,7 @@ class StubGenerator: public StubCodeGenerator {
__ deccc(count);
__ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
__ delayed()->sth(O4, end_to, 0);
}
__ BIND(L_exit);
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
@ -1870,9 +1899,11 @@ class StubGenerator: public StubCodeGenerator {
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
}
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
generate_disjoint_int_copy_core(aligned);
}
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
__ retl();
@ -2005,9 +2036,11 @@ class StubGenerator: public StubCodeGenerator {
}
array_overlap_test(nooverlap_target, 2);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, !aligned, false);
generate_conjoint_int_copy_core(aligned);
}
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
__ retl();
@ -2156,8 +2189,11 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, true, false);
generate_disjoint_long_copy_core(aligned);
}
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
__ retl();
@ -2232,9 +2268,11 @@ class StubGenerator: public StubCodeGenerator {
}
array_overlap_test(nooverlap_target, 3);
{
// UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit
UnsafeCopyMemoryMark ucmm(this, true, false);
generate_conjoint_long_copy_core(aligned);
}
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
__ retl();
@ -2929,6 +2967,9 @@ class StubGenerator: public StubCodeGenerator {
address entry_jlong_arraycopy;
address entry_checkcast_arraycopy;
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
UnsafeCopyMemory::set_common_exit_stub_pc(ucm_common_error_exit);
//*** jbyte
// Always need aligned and unaligned versions
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
@ -5821,6 +5862,10 @@ class StubGenerator: public StubCodeGenerator {
}; // end class declaration
#define UCM_TABLE_MAX_ENTRIES 8
void StubGenerator_generate(CodeBuffer* code, bool all) {
if (UnsafeCopyMemory::_table == NULL) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, all);
}

View File

@ -789,6 +789,8 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x59: // mulpd
case 0x6E: // movd
case 0x7E: // movd
case 0x6F: // movdq
case 0x7F: // movdq
case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
case 0xFE: // paddd
debug_only(has_disp32 = true);
@ -4274,6 +4276,7 @@ void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
emit_operand(dst, src);
emit_int8(mode & 0xFF);
}
void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
assert(VM_Version::supports_evex(), "requires EVEX support");
assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");

View File

@ -889,7 +889,10 @@ class StubGenerator: public StubCodeGenerator {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
{
bool add_entry = (t != T_OBJECT && (!aligned || t == T_INT));
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, add_entry, true);
__ subptr(to, from); // to --> to_from
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
__ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
@ -973,7 +976,11 @@ class StubGenerator: public StubCodeGenerator {
} else {
__ BIND(L_copy_2_bytes);
}
}
if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
__ emms();
}
__ movl(count, Address(rsp, 12+12)); // reread 'count'
bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
@ -1079,6 +1086,10 @@ class StubGenerator: public StubCodeGenerator {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
{
bool add_entry = (t != T_OBJECT && (!aligned || t == T_INT));
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, add_entry, true);
// copy from high to low
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
__ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
@ -1176,7 +1187,11 @@ class StubGenerator: public StubCodeGenerator {
} else {
__ BIND(L_copy_2_bytes);
}
}
if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
__ emms();
}
__ movl2ptr(count, Address(rsp, 12+12)); // reread count
bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
@ -1212,6 +1227,9 @@ class StubGenerator: public StubCodeGenerator {
*entry = __ pc(); // Entry point from conjoint arraycopy stub.
BLOCK_COMMENT("Entry:");
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, true, true);
__ subptr(to, from); // to --> to_from
if (VM_Version::supports_mmx()) {
if (UseXMMForArrayCopy) {
@ -1230,6 +1248,10 @@ class StubGenerator: public StubCodeGenerator {
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
}
}
if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
__ emms();
}
inc_copy_counter_np(T_LONG);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ vzeroupper();
@ -1267,6 +1289,10 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(from, Address(rsp, 8)); // from
__ jump_cc(Assembler::aboveEqual, nooverlap);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, true, true);
__ jmpb(L_copy_8_bytes);
__ align(OptoLoopAlignment);
@ -1287,6 +1313,7 @@ class StubGenerator: public StubCodeGenerator {
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
}
if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
__ emms();
}
@ -3945,7 +3972,10 @@ class StubGenerator: public StubCodeGenerator {
}
}; // end class declaration
#define UCM_TABLE_MAX_ENTRIES 8
void StubGenerator_generate(CodeBuffer* code, bool all) {
if (UnsafeCopyMemory::_table == NULL) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, all);
}

View File

@ -1433,7 +1433,6 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
}
// Arguments:
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
// ignored
@ -1482,6 +1481,9 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !aligned, true);
// 'from', 'to' and 'count' are now valid
__ movptr(byte_count, count);
__ shrptr(count, 3); // count => qword_count
@ -1525,8 +1527,9 @@ class StubGenerator: public StubCodeGenerator {
__ jccb(Assembler::zero, L_exit);
__ movb(rax, Address(end_from, 8));
__ movb(Address(end_to, 8), rax);
}
__ BIND(L_exit);
address ucme_exit_pc = __ pc();
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@ -1534,10 +1537,12 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
{
UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc);
// Copy in multi-bytes chunks
copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
__ jmp(L_copy_4_bytes);
}
return start;
}
@ -1582,6 +1587,9 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !aligned, true);
// 'from', 'to' and 'count' are now valid
__ movptr(byte_count, count);
__ shrptr(count, 3); // count => qword_count
@ -1616,7 +1624,7 @@ class StubGenerator: public StubCodeGenerator {
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
__ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
}
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@ -1624,9 +1632,12 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !aligned, true);
// Copy in multi-bytes chunks
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
}
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@ -1684,6 +1695,9 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !aligned, true);
// 'from', 'to' and 'count' are now valid
__ movptr(word_count, count);
__ shrptr(count, 2); // count => qword_count
@ -1720,8 +1734,9 @@ class StubGenerator: public StubCodeGenerator {
__ jccb(Assembler::zero, L_exit);
__ movw(rax, Address(end_from, 8));
__ movw(Address(end_to, 8), rax);
}
__ BIND(L_exit);
address ucme_exit_pc = __ pc();
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@ -1729,9 +1744,12 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
{
UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc);
// Copy in multi-bytes chunks
copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
__ jmp(L_copy_4_bytes);
}
return start;
}
@ -1798,6 +1816,9 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !aligned, true);
// 'from', 'to' and 'count' are now valid
__ movptr(word_count, count);
__ shrptr(count, 2); // count => qword_count
@ -1824,7 +1845,7 @@ class StubGenerator: public StubCodeGenerator {
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
__ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
}
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@ -1832,9 +1853,12 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !aligned, true);
// Copy in multi-bytes chunks
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
}
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@ -1905,6 +1929,9 @@ class StubGenerator: public StubCodeGenerator {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true);
// 'from', 'to' and 'count' are now valid
__ movptr(dword_count, count);
__ shrptr(count, 1); // count => qword_count
@ -1928,8 +1955,9 @@ class StubGenerator: public StubCodeGenerator {
__ jccb(Assembler::zero, L_exit);
__ movl(rax, Address(end_from, 8));
__ movl(Address(end_to, 8), rax);
}
__ BIND(L_exit);
address ucme_exit_pc = __ pc();
bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count);
restore_arg_regs_using_thread();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
@ -1938,9 +1966,12 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
{
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, false, ucme_exit_pc);
// Copy in multi-bytes chunks
copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
__ jmp(L_copy_4_bytes);
}
return start;
}
@ -2001,6 +2032,9 @@ class StubGenerator: public StubCodeGenerator {
bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
assert_clean_int(count, rax); // Make sure 'count' is clean int.
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true);
// 'from', 'to' and 'count' are now valid
__ movptr(dword_count, count);
__ shrptr(count, 1); // count => qword_count
@ -2020,7 +2054,7 @@ class StubGenerator: public StubCodeGenerator {
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
__ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
}
if (is_oop) {
__ jmp(L_exit);
}
@ -2031,8 +2065,12 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true);
// Copy in multi-bytes chunks
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
}
__ BIND(L_exit);
bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count);
@ -2102,6 +2140,9 @@ class StubGenerator: public StubCodeGenerator {
BasicType type = is_oop ? T_OBJECT : T_LONG;
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true);
// Copy from low to high addresses. Use 'to' as scratch.
__ lea(end_from, Address(from, qword_count, Address::times_8, -8));
@ -2115,7 +2156,7 @@ class StubGenerator: public StubCodeGenerator {
__ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
__ increment(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
}
if (is_oop) {
__ jmp(L_exit);
} else {
@ -2127,8 +2168,12 @@ class StubGenerator: public StubCodeGenerator {
__ ret(0);
}
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true);
// Copy in multi-bytes chunks
copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
}
__ BIND(L_exit);
bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count);
@ -2195,6 +2240,9 @@ class StubGenerator: public StubCodeGenerator {
BasicType type = is_oop ? T_OBJECT : T_LONG;
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count);
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true);
__ jmp(L_copy_bytes);
@ -2204,7 +2252,7 @@ class StubGenerator: public StubCodeGenerator {
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
__ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
}
if (is_oop) {
__ jmp(L_exit);
} else {
@ -2215,10 +2263,13 @@ class StubGenerator: public StubCodeGenerator {
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
}
{
// UnsafeCopyMemory page error: continue after ucm
UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true);
// Copy in multi-bytes chunks
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
}
__ BIND(L_exit);
bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count);
restore_arg_regs_using_thread();
@ -6036,6 +6087,10 @@ address generate_avx_ghash_processBlocks() {
}
}; // end class declaration
#define UCM_TABLE_MAX_ENTRIES 16
void StubGenerator_generate(CodeBuffer* code, bool all) {
if (UnsafeCopyMemory::_table == NULL) {
UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
}
StubGenerator g(code, all);
}

View File

@ -2581,10 +2581,18 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
}
if ((thread->thread_state() == _thread_in_vm &&
bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
if (((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native ||
is_unsafe_arraycopy) &&
thread->doing_unsafe_access()) ||
(nm != NULL && nm->has_unsafe_access())) {
return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc)));
address next_pc = Assembler::locate_next_instruction(pc);
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -441,8 +441,12 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + 4;
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Aix::ucontext_set_pc(uc, next_pc);
return 1;
@ -461,9 +465,13 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
stub = pc + 4; // continue with next instruction.
goto run_stub;
}
else if (thread->thread_state() == _thread_in_vm &&
else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
address next_pc = pc + 4;
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Aix::ucontext_set_pc(uc, next_pc);
return 1;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -589,8 +589,12 @@ JVM_handle_bsd_signal(int sig,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = Assembler::locate_next_instruction(pc);
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
@ -659,10 +663,14 @@ JVM_handle_bsd_signal(int sig,
// Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
} else if (thread->thread_state() == _thread_in_vm &&
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = Assembler::locate_next_instruction(pc);
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -193,7 +193,8 @@ JVM_handle_bsd_signal(int sig,
/*if (thread->thread_state() == _thread_in_Java) {
ShouldNotCallThis();
}
else*/ if (thread->thread_state() == _thread_in_vm &&
else*/ if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
ShouldNotCallThis();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -419,8 +419,12 @@ JVM_handle_linux_signal(int sig,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + NativeCall::instruction_size;
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
@ -439,10 +443,14 @@ JVM_handle_linux_signal(int sig,
// Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
} else if (thread->thread_state() == _thread_in_vm &&
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = pc + NativeCall::instruction_size;
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -384,7 +384,7 @@ extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
if ((nm != NULL && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) {
unsafe_access = true;
}
} else if (sig == SIGSEGV &&
@ -398,7 +398,8 @@ extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info,
// Zombie
stub = SharedRuntime::get_handle_wrong_method_stub();
}
} else if (thread->thread_state() == _thread_in_vm &&
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
unsafe_access = true;
}
@ -418,6 +419,9 @@ extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info,
// any other suitable exception reason,
// so assume it is an unsafe access.
address next_pc = pc + Assembler::InstructionSize;
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
#ifdef __thumb__
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
next_pc = (address)((intptr_t)next_pc | 0x1);

View File

@ -469,8 +469,12 @@ JVM_handle_linux_signal(int sig,
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + 4;
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Linux::ucontext_set_pc(uc, next_pc);
return true;
@ -485,11 +489,15 @@ JVM_handle_linux_signal(int sig,
// flushing of icache is not necessary.
stub = pc + 4; // continue with next instruction.
}
else if (thread->thread_state() == _thread_in_vm &&
else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
address next_pc = pc + 4;
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Linux::ucontext_set_pc(uc, pc + 4);
os::Linux::ucontext_set_pc(uc, next_pc);
return true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -467,7 +467,8 @@ JVM_handle_linux_signal(int sig,
// when the vector facility is installed, but operating system support is missing.
VM_Version::reset_has_VectorFacility();
stub = pc; // Continue with next instruction.
} else if (thread->thread_state() == _thread_in_vm &&
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -385,7 +385,11 @@ inline static bool checkByteBuffer(address pc, address npc, JavaThread * thread,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
if (is_unsafe_arraycopy) {
npc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
*stub = SharedRuntime::handle_unsafe_access(thread, npc);
return true;
}
@ -550,8 +554,12 @@ JVM_handle_linux_signal(int sig,
}
if (sig == SIGBUS &&
thread->thread_state() == _thread_in_vm &&
(thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
thread->doing_unsafe_access()) {
if (UnsafeCopyMemory::contains_pc(pc)) {
npc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, npc);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -435,8 +435,12 @@ JVM_handle_linux_signal(int sig,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = Assembler::locate_next_instruction(pc);
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
@ -483,10 +487,14 @@ JVM_handle_linux_signal(int sig,
// Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
} else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
(sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access())) {
address next_pc = Assembler::locate_next_instruction(pc);
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -207,7 +207,8 @@ JVM_handle_linux_signal(int sig,
/*if (thread->thread_state() == _thread_in_Java) {
ShouldNotCallThis();
}
else*/ if (thread->thread_state() == _thread_in_vm &&
else*/ if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
ShouldNotCallThis();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -436,8 +436,12 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
}
if (thread->thread_state() == _thread_in_vm) {
if (thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) {
if (sig == SIGBUS && thread->doing_unsafe_access()) {
if (UnsafeCopyMemory::contains_pc(pc)) {
npc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, npc);
}
}
@ -476,7 +480,11 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
if (is_unsafe_arraycopy) {
npc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, npc);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -517,9 +517,13 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
stub = VM_Version::cpuinfo_cont_addr();
}
if (thread->thread_state() == _thread_in_vm) {
if (thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) {
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
address next_pc = Assembler::locate_next_instruction(pc);
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
@ -536,8 +540,12 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
if (cb != NULL) {
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy)) {
address next_pc = Assembler::locate_next_instruction(pc);
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}

View File

@ -4240,6 +4240,14 @@ bool LibraryCallKit::inline_unsafe_copyMemory() {
// Do not let writes of the copy source or destination float below the copy.
insert_mem_bar(Op_MemBarCPUOrder);
Node* thread = _gvn.transform(new ThreadLocalNode());
Node* doing_unsafe_access_addr = basic_plus_adr(top(), thread, in_bytes(JavaThread::doing_unsafe_access_offset()));
BasicType doing_unsafe_access_bt = T_BYTE;
assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
// update volatile field
store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
// Call it. Note that the length argument is not scaled.
make_runtime_call(RC_LEAF|RC_NO_FP,
OptoRuntime::fast_arraycopy_Type(),
@ -4248,6 +4256,8 @@ bool LibraryCallKit::inline_unsafe_copyMemory() {
TypeRawPtr::BOTTOM,
src, dst, size XTOP);
store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
// Do not let reads of the copy destination float above the copy.
insert_mem_bar(Op_MemBarCPUOrder);

View File

@ -148,6 +148,25 @@ jlong Unsafe_field_offset_from_byte_offset(jlong byte_offset) {
///// Data read/writes on the Java heap and in native (off-heap) memory
/**
* Helper class to wrap memory accesses in JavaThread::doing_unsafe_access()
*/
class GuardUnsafeAccess {
JavaThread* _thread;
public:
GuardUnsafeAccess(JavaThread* thread) : _thread(thread) {
// native/off-heap access which may raise SIGBUS if accessing
// memory mapped file data in a region of the file which has
// been truncated and is now invalid.
_thread->set_doing_unsafe_access(true);
}
~GuardUnsafeAccess() {
_thread->set_doing_unsafe_access(false);
}
};
/**
* Helper class for accessing memory.
*
@ -189,25 +208,6 @@ class MemoryAccess : StackObj {
return x != 0;
}
/**
* Helper class to wrap memory accesses in JavaThread::doing_unsafe_access()
*/
class GuardUnsafeAccess {
JavaThread* _thread;
public:
GuardUnsafeAccess(JavaThread* thread) : _thread(thread) {
// native/off-heap access which may raise SIGBUS if accessing
// memory mapped file data in a region of the file which has
// been truncated and is now invalid
_thread->set_doing_unsafe_access(true);
}
~GuardUnsafeAccess() {
_thread->set_doing_unsafe_access(false);
}
};
public:
MemoryAccess(JavaThread* thread, jobject obj, jlong offset)
: _thread(thread), _obj(JNIHandles::resolve(obj)), _offset((ptrdiff_t)offset) {
@ -399,8 +399,14 @@ UNSAFE_ENTRY(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcOb
void* src = index_oop_from_field_offset_long(srcp, srcOffset);
void* dst = index_oop_from_field_offset_long(dstp, dstOffset);
{
GuardUnsafeAccess guard(thread);
if (StubRoutines::unsafe_arraycopy() != NULL) {
StubRoutines::UnsafeArrayCopy_stub()(src, dst, sz);
} else {
Copy::conjoint_memory_atomic(src, dst, sz);
}
}
} UNSAFE_END
// This function is a leaf since if the source and destination are both in native memory
@ -416,7 +422,11 @@ UNSAFE_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject sr
address src = (address)srcOffset;
address dst = (address)dstOffset;
{
JavaThread* thread = JavaThread::thread_from_jni_environment(env);
GuardUnsafeAccess guard(thread);
Copy::conjoint_swap(src, dst, sz, esz);
}
} else {
// At least one of src/dst are on heap, transition to VM to access raw pointers
@ -427,7 +437,10 @@ UNSAFE_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject sr
address src = (address)index_oop_from_field_offset_long(srcp, srcOffset);
address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset);
{
GuardUnsafeAccess guard(thread);
Copy::conjoint_swap(src, dst, sz, esz);
}
} JVM_END
}
} UNSAFE_END

View File

@ -38,6 +38,10 @@
#include "opto/runtime.hpp"
#endif
UnsafeCopyMemory* UnsafeCopyMemory::_table = NULL;
int UnsafeCopyMemory::_table_length = 0;
int UnsafeCopyMemory::_table_max_length = 0;
address UnsafeCopyMemory::_common_exit_stub_pc = NULL;
// Implementation of StubRoutines - for a description
// of how to extend it, see the header file.
@ -113,7 +117,6 @@ address StubRoutines::_checkcast_arraycopy_uninit = NULL;
address StubRoutines::_unsafe_arraycopy = NULL;
address StubRoutines::_generic_arraycopy = NULL;
address StubRoutines::_jbyte_fill;
address StubRoutines::_jshort_fill;
address StubRoutines::_jint_fill;
@ -177,6 +180,31 @@ address StubRoutines::_safefetchN_continuation_pc = NULL;
extern void StubGenerator_generate(CodeBuffer* code, bool all); // only interface to generators
void UnsafeCopyMemory::create_table(int max_size) {
UnsafeCopyMemory::_table = new UnsafeCopyMemory[max_size];
UnsafeCopyMemory::_table_max_length = max_size;
}
bool UnsafeCopyMemory::contains_pc(address pc) {
for (int i = 0; i < UnsafeCopyMemory::_table_length; i++) {
UnsafeCopyMemory* entry = &UnsafeCopyMemory::_table[i];
if (pc >= entry->start_pc() && pc < entry->end_pc()) {
return true;
}
}
return false;
}
address UnsafeCopyMemory::page_error_continue_pc(address pc) {
for (int i = 0; i < UnsafeCopyMemory::_table_length; i++) {
UnsafeCopyMemory* entry = &UnsafeCopyMemory::_table[i];
if (pc >= entry->start_pc() && pc < entry->end_pc()) {
return entry->error_exit_pc();
}
}
return NULL;
}
void StubRoutines::initialize1() {
if (_code1 == NULL) {
ResourceMark rm;
@ -569,3 +597,25 @@ StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint
#undef RETURN_STUB
#undef RETURN_STUB_PARM
}
UnsafeCopyMemoryMark::UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
_cgen = cgen;
_ucm_entry = NULL;
if (add_entry) {
address err_exit_pc = NULL;
if (!continue_at_scope_end) {
err_exit_pc = error_exit_pc != NULL ? error_exit_pc : UnsafeCopyMemory::common_exit_stub_pc();
}
assert(err_exit_pc != NULL || continue_at_scope_end, "error exit not set");
_ucm_entry = UnsafeCopyMemory::add_to_table(_cgen->assembler()->pc(), NULL, err_exit_pc);
}
}
UnsafeCopyMemoryMark::~UnsafeCopyMemoryMark() {
if (_ucm_entry != NULL) {
_ucm_entry->set_end_pc(_cgen->assembler()->pc());
if (_ucm_entry->error_exit_pc() == NULL) {
_ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
}
}
}

View File

@ -74,6 +74,51 @@
// 4. implement the corresponding generator function in the platform-dependent
// stubGenerator_<arch>.cpp file and call the function in generate_all() of that file
class UnsafeCopyMemory : public CHeapObj<mtCode> {
private:
address _start_pc;
address _end_pc;
address _error_exit_pc;
public:
static address _common_exit_stub_pc;
static UnsafeCopyMemory* _table;
static int _table_length;
static int _table_max_length;
UnsafeCopyMemory() : _start_pc(NULL), _end_pc(NULL), _error_exit_pc(NULL) {}
void set_start_pc(address pc) { _start_pc = pc; }
void set_end_pc(address pc) { _end_pc = pc; }
void set_error_exit_pc(address pc) { _error_exit_pc = pc; }
address start_pc() const { return _start_pc; }
address end_pc() const { return _end_pc; }
address error_exit_pc() const { return _error_exit_pc; }
static void set_common_exit_stub_pc(address pc) { _common_exit_stub_pc = pc; }
static address common_exit_stub_pc() { return _common_exit_stub_pc; }
static UnsafeCopyMemory* add_to_table(address start_pc, address end_pc, address error_exit_pc) {
guarantee(_table_length < _table_max_length, "Incorrect UnsafeCopyMemory::_table_max_length");
UnsafeCopyMemory* entry = &_table[_table_length];
entry->set_start_pc(start_pc);
entry->set_end_pc(end_pc);
entry->set_error_exit_pc(error_exit_pc);
_table_length++;
return entry;
}
static bool contains_pc(address pc);
static address page_error_continue_pc(address pc);
static void create_table(int max_size);
};
class UnsafeCopyMemoryMark : public StackObj {
private:
UnsafeCopyMemory* _ucm_entry;
StubCodeGenerator* _cgen;
public:
UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = NULL);
~UnsafeCopyMemoryMark();
};
class StubRoutines: AllStatic {
@ -310,11 +355,14 @@ class StubRoutines: AllStatic {
static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy;
}
static address checkcast_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy;
}
static address unsafe_arraycopy() { return _unsafe_arraycopy; }
typedef void (*UnsafeArrayCopyStub)(const void* src, void* dst, size_t count);
static UnsafeArrayCopyStub UnsafeArrayCopy_stub() { return CAST_TO_FN_PTR(UnsafeArrayCopyStub, _unsafe_arraycopy); }
static address generic_arraycopy() { return _generic_arraycopy; }
static address jbyte_fill() { return _jbyte_fill; }

View File

@ -1794,6 +1794,7 @@ class JavaThread: public Thread {
static ByteSize should_post_on_exceptions_flag_offset() {
return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
}
static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); }
// Returns the jni environment for this thread
JNIEnv* jni_environment() { return &_jni_environment; }

View File

@ -0,0 +1,155 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8191278
* @requires os.family != "windows"
* @summary Check that SIGBUS errors caused by memory accesses in Unsafe_CopyMemory()
* and UnsafeCopySwapMemory() get converted to java.lang.InternalError exceptions.
* @modules java.base/jdk.internal.misc
*
* @library /test/lib
* @build sun.hotspot.WhiteBox
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
*
* @run main/othervm -XX:CompileCommand=exclude,*InternalErrorTest.main -XX:CompileCommand=inline,*.get -XX:CompileCommand=inline,*Unsafe.* -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI InternalErrorTest
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import jdk.internal.misc.Unsafe;
import sun.hotspot.WhiteBox;
// Test that illegal memory access errors in Unsafe_CopyMemory0() and
// UnsafeCopySwapMemory() that cause SIGBUS errors result in
// java.lang.InternalError exceptions, not JVM crashes.
public class InternalErrorTest {
private static final Unsafe unsafe = Unsafe.getUnsafe();
private static final int pageSize = WhiteBox.getWhiteBox().getVMPageSize();
private static final String expectedErrorMsg = "fault occurred in a recent unsafe memory access";
private static final String failureMsg1 = "InternalError not thrown";
private static final String failureMsg2 = "Wrong InternalError: ";
public static void main(String[] args) throws Throwable {
Unsafe unsafe = Unsafe.getUnsafe();
String currentDir = System.getProperty("test.classes");
File file = new File(currentDir, "tmpFile.txt");
StringBuilder s = new StringBuilder();
for (int i = 1; i < pageSize + 1000; i++) {
s.append("1");
}
Files.write(file.toPath(), s.toString().getBytes());
FileChannel fileChannel = new RandomAccessFile(file, "r").getChannel();
MappedByteBuffer buffer =
fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, fileChannel.size());
// Get address of mapped memory.
long mapAddr = 0;
try {
Field af = java.nio.Buffer.class.getDeclaredField("address");
af.setAccessible(true);
mapAddr = af.getLong(buffer);
} catch (Exception f) {
throw f;
}
long allocMem = unsafe.allocateMemory(4000);
for (int i = 0; i < 3; i++) {
test(buffer, unsafe, mapAddr, allocMem, i);
}
Files.write(file.toPath(), "2".getBytes());
buffer.position(buffer.position() + pageSize);
for (int i = 0; i < 3; i++) {
try {
test(buffer, unsafe, mapAddr, allocMem, i);
WhiteBox.getWhiteBox().forceSafepoint();
throw new RuntimeException(failureMsg1);
} catch (InternalError e) {
if (!e.getMessage().contains(expectedErrorMsg)) {
throw new RuntimeException(failureMsg2 + e.getMessage());
}
}
}
Method m = InternalErrorTest.class.getMethod("test", MappedByteBuffer.class, Unsafe.class, long.class, long.class, int.class);
WhiteBox.getWhiteBox().enqueueMethodForCompilation(m, 3);
for (int i = 0; i < 3; i++) {
try {
test(buffer, unsafe, mapAddr, allocMem, i);
WhiteBox.getWhiteBox().forceSafepoint();
throw new RuntimeException(failureMsg1);
} catch (InternalError e) {
if (!e.getMessage().contains(expectedErrorMsg)) {
throw new RuntimeException(failureMsg2 + e.getMessage());
}
}
}
WhiteBox.getWhiteBox().enqueueMethodForCompilation(m, 4);
for (int i = 0; i < 3; i++) {
try {
test(buffer, unsafe, mapAddr, allocMem, i);
WhiteBox.getWhiteBox().forceSafepoint();
throw new RuntimeException(failureMsg1);
} catch (InternalError e) {
if (!e.getMessage().contains(expectedErrorMsg)) {
throw new RuntimeException(failureMsg2 + e.getMessage());
}
}
}
System.out.println("Success");
}
public static void test(MappedByteBuffer buffer, Unsafe unsafe, long mapAddr, long allocMem, int type) {
switch (type) {
case 0:
// testing Unsafe.copyMemory, trying to access a word from next page after truncation.
buffer.get(new byte[8]);
break;
case 1:
// testing Unsafe.copySwapMemory, trying to access next page after truncation.
unsafe.copySwapMemory(null, mapAddr + pageSize, new byte[4000], 16, 2000, 2);
break;
case 2:
// testing Unsafe.copySwapMemory, trying to access next page after truncation.
unsafe.copySwapMemory(null, mapAddr + pageSize, null, allocMem, 2000, 2);
break;
}
}
}