diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp index 91f8fe16be8..85238a51009 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp @@ -86,7 +86,6 @@ void C1_MacroAssembler::verified_entry(bool breakAtEntry) { void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { const int hdr_offset = oopDesc::mark_offset_in_bytes(); assert_different_registers(hdr, obj, disp_hdr); - NearLabel done; verify_oop(obj, FILE_AND_LINE); @@ -102,40 +101,47 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd z_btrue(slow_case); } - // and mark it as unlocked. - z_oill(hdr, markWord::unlocked_value); - // Save unlocked object header into the displaced header location on the stack. - z_stg(hdr, Address(disp_hdr, (intptr_t)0)); - // Test if object header is still the same (i.e. unlocked), and if so, store the - // displaced header address in the object header. If it is not the same, get the - // object header instead. - z_csg(hdr, disp_hdr, hdr_offset, obj); - // If the object header was the same, we're done. - branch_optimized(Assembler::bcondEqual, done); - // If the object header was not the same, it is now in the hdr register. - // => Test if it is a stack pointer into the same stack (recursive locking), i.e.: - // - // 1) (hdr & markWord::lock_mask_in_place) == 0 - // 2) rsp <= hdr - // 3) hdr <= rsp + page_size - // - // These 3 tests can be done by evaluating the following expression: - // - // (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place) - // - // assuming both the stack pointer and page_size have their least - // significant 2 bits cleared and page_size is a power of 2 - z_sgr(hdr, Z_SP); + assert(LockingMode != LM_MONITOR, "LM_MONITOR is already handled, by emit_lock()"); - load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place)); - z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0). - // For recursive locking, the result is zero. => Save it in the displaced header - // location (null in the displaced hdr location indicates recursive locking). - z_stg(hdr, Address(disp_hdr, (intptr_t)0)); - // Otherwise we don't care about the result and handle locking via runtime call. - branch_optimized(Assembler::bcondNotZero, slow_case); - // done - bind(done); + if (LockingMode == LM_LIGHTWEIGHT) { + Unimplemented(); + } else if (LockingMode == LM_LEGACY) { + NearLabel done; + // and mark it as unlocked. + z_oill(hdr, markWord::unlocked_value); + // Save unlocked object header into the displaced header location on the stack. + z_stg(hdr, Address(disp_hdr, (intptr_t) 0)); + // Test if object header is still the same (i.e. unlocked), and if so, store the + // displaced header address in the object header. If it is not the same, get the + // object header instead. + z_csg(hdr, disp_hdr, hdr_offset, obj); + // If the object header was the same, we're done. + branch_optimized(Assembler::bcondEqual, done); + // If the object header was not the same, it is now in the hdr register. + // => Test if it is a stack pointer into the same stack (recursive locking), i.e.: + // + // 1) (hdr & markWord::lock_mask_in_place) == 0 + // 2) rsp <= hdr + // 3) hdr <= rsp + page_size + // + // These 3 tests can be done by evaluating the following expression: + // + // (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place) + // + // assuming both the stack pointer and page_size have their least + // significant 2 bits cleared and page_size is a power of 2 + z_sgr(hdr, Z_SP); + + load_const_optimized(Z_R0_scratch, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place)); + z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0). + // For recursive locking, the result is zero. => Save it in the displaced header + // location (null in the displaced hdr location indicates recursive locking). + z_stg(hdr, Address(disp_hdr, (intptr_t) 0)); + // Otherwise we don't care about the result and handle locking via runtime call. + branch_optimized(Assembler::bcondNotZero, slow_case); + // done + bind(done); + } } void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { @@ -144,21 +150,29 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ assert_different_registers(hdr, obj, disp_hdr); NearLabel done; - // Load displaced header. - z_ltg(hdr, Address(disp_hdr, (intptr_t)0)); - // If the loaded hdr is null we had recursive locking, and we are done. - z_bre(done); + if (LockingMode != LM_LIGHTWEIGHT) { + // Load displaced header. + z_ltg(hdr, Address(disp_hdr, (intptr_t) 0)); + // If the loaded hdr is null we had recursive locking, and we are done. + z_bre(done); + } + // Load object. z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); verify_oop(obj, FILE_AND_LINE); - // Test if object header is pointing to the displaced header, and if so, restore - // the displaced header in the object. If the object header is not pointing to - // the displaced header, get the object header instead. - z_csg(disp_hdr, hdr, hdr_offset, obj); - // If the object header was not pointing to the displaced header, - // we do unlocking via runtime call. - branch_optimized(Assembler::bcondNotEqual, slow_case); - // done + + if (LockingMode == LM_LIGHTWEIGHT) { + Unimplemented(); + } else { + // Test if object header is pointing to the displaced header, and if so, restore + // the displaced header in the object. If the object header is not pointing to + // the displaced header, get the object header instead. + z_csg(disp_hdr, hdr, hdr_offset, obj); + // If the object header was not pointing to the displaced header, + // we do unlocking via runtime call. + branch_optimized(Assembler::bcondNotEqual, slow_case); + // done + } bind(done); } diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index ebd678fea08..8ca27efa71d 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -3171,22 +3171,28 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis z_nill(temp, markWord::monitor_value); z_brne(object_has_monitor); - // Set mark to markWord | markWord::unlocked_value. - z_oill(displacedHeader, markWord::unlocked_value); + if (LockingMode != LM_MONITOR) { + // Set mark to markWord | markWord::unlocked_value. + z_oill(displacedHeader, markWord::unlocked_value); - // Load Compare Value application register. + // Load Compare Value application register. - // Initialize the box (must happen before we update the object mark). - z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); + // Initialize the box (must happen before we update the object mark). + z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); - // Memory Fence (in cmpxchgd) - // Compare object markWord with mark and if equal exchange scratch1 with object markWord. + // Memory Fence (in cmpxchgd) + // Compare object markWord with mark and if equal exchange scratch1 with object markWord. - // If the compare-and-swap succeeded, then we found an unlocked object and we - // have now locked it. - z_csg(displacedHeader, box, 0, oop); - assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture. - z_bre(done); + // If the compare-and-swap succeeded, then we found an unlocked object and we + // have now locked it. + z_csg(displacedHeader, box, 0, oop); + assert(currentHeader == displacedHeader, "must be same register"); // Identified two registers from z/Architecture. + z_bre(done); + } else { + // Set NE to indicate 'failure' -> take slow-path + z_ltgr(oop, oop); + z_bru(done); + } // We did not see an unlocked object so try the fast recursive case. @@ -3238,10 +3244,12 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg BLOCK_COMMENT("compiler_fast_unlock_object {"); - // Find the lock address and load the displaced header from the stack. - // if the displaced header is zero, we have a recursive unlock. - load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); - z_bre(done); + if (LockingMode != LM_MONITOR) { + // Find the lock address and load the displaced header from the stack. + // if the displaced header is zero, we have a recursive unlock. + load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); + z_bre(done); + } // Handle existing monitor. // The object has an existing monitor iff (mark & monitor_value) != 0. @@ -3250,12 +3258,18 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg z_nill(currentHeader, markWord::monitor_value); z_brne(object_has_monitor); - // Check if it is still a light weight lock, this is true if we see - // the stack address of the basicLock in the markWord of the object - // copy box to currentHeader such that csg does not kill it. - z_lgr(currentHeader, box); - z_csg(currentHeader, displacedHeader, 0, oop); - z_bru(done); // Csg sets CR as desired. + if (LockingMode != LM_MONITOR) { + // Check if it is still a light weight lock, this is true if we see + // the stack address of the basicLock in the markWord of the object + // copy box to currentHeader such that csg does not kill it. + z_lgr(currentHeader, box); + z_csg(currentHeader, displacedHeader, 0, oop); + z_bru(done); // Csg sets CR as desired. + } else { + // Set NE to indicate 'failure' -> take slow-path + z_ltgr(oop, oop); + z_bru(done); + } // Handle existing monitor. bind(object_has_monitor); diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index f9b324bead3..d83a54b0023 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -1921,7 +1921,7 @@ bool Arguments::check_vm_args_consistency() { FLAG_SET_CMDLINE(LockingMode, LM_MONITOR); } -#if !defined(X86) && !defined(AARCH64) && !defined(PPC64) && !defined(RISCV64) +#if !defined(X86) && !defined(AARCH64) && !defined(PPC64) && !defined(RISCV64) && !defined(S390) if (LockingMode == LM_MONITOR) { jio_fprintf(defaultStream::error_stream(), "LockingMode == 0 (LM_MONITOR) is not fully implemented on this architecture"); diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index 3479bde358d..69a712bfb76 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -486,7 +486,7 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread } static bool useHeavyMonitors() { -#if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) +#if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390) return LockingMode == LM_MONITOR; #else return false; diff --git a/test/jdk/java/util/concurrent/ConcurrentHashMap/MapLoops.java b/test/jdk/java/util/concurrent/ConcurrentHashMap/MapLoops.java index b4e96b9f794..eacfaeb0785 100644 --- a/test/jdk/java/util/concurrent/ConcurrentHashMap/MapLoops.java +++ b/test/jdk/java/util/concurrent/ConcurrentHashMap/MapLoops.java @@ -48,7 +48,7 @@ /* * @test * @summary Exercise multithreaded maps, using only heavy monitors. - * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch == "ppc64" | os.arch == "ppc64le" | os.arch == "riscv64" + * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch == "ppc64" | os.arch == "ppc64le" | os.arch == "riscv64" | os.arch == "s390x" * @requires vm.debug * @library /test/lib * @run main/othervm/timeout=1600 -XX:+UseHeavyMonitors -XX:+VerifyHeavyMonitors MapLoops