This commit is contained in:
Vladimir Kozlov 2015-04-16 14:05:48 -07:00
commit b9c00b1904
256 changed files with 5873 additions and 4093 deletions

View file

@ -46,6 +46,11 @@ public abstract class ActionManager
return manager;
}
protected static void setInstance(ActionManager m)
{
manager = m;
}
protected abstract void addActions();
protected void addAction(String cmdname, Action action)
@ -90,6 +95,6 @@ public abstract class ActionManager
private HashMap actions;
private static ActionUtilities utilities = new ActionUtilities();
protected static ActionManager manager;
private static ActionManager manager;
}

View file

@ -29,9 +29,9 @@ import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.gc_interface.CollectedHeap;
import sun.jvm.hotspot.gc_interface.CollectedHeapName;
import sun.jvm.hotspot.memory.MemRegion;
import sun.jvm.hotspot.memory.SharedHeap;
import sun.jvm.hotspot.memory.SpaceClosure;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
@ -41,7 +41,7 @@ import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for G1CollectedHeap.
public class G1CollectedHeap extends SharedHeap {
public class G1CollectedHeap extends CollectedHeap {
// HeapRegionManager _hrm;
static private long hrmFieldOffset;
// MemRegion _g1_reserved;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@ import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class CollectedHeap extends VMObject {
public abstract class CollectedHeap extends VMObject {
private static long reservedFieldOffset;
static {
@ -73,9 +73,7 @@ public class CollectedHeap extends VMObject {
return reservedRegion().contains(a);
}
public CollectedHeapName kind() {
return CollectedHeapName.ABSTRACT;
}
public abstract CollectedHeapName kind();
public void print() { printOn(System.out); }
public void printOn(PrintStream tty) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,8 +31,6 @@ public class CollectedHeapName {
private CollectedHeapName(String name) { this.name = name; }
public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");

View file

@ -33,8 +33,7 @@ import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class GenCollectedHeap extends SharedHeap {
private static CIntegerField nGensField;
public class GenCollectedHeap extends CollectedHeap {
private static AddressField youngGenField;
private static AddressField oldGenField;
@ -54,7 +53,6 @@ public class GenCollectedHeap extends SharedHeap {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("GenCollectedHeap");
nGensField = type.getCIntegerField("_n_gens");
youngGenField = type.getAddressField("_young_gen");
oldGenField = type.getAddressField("_old_gen");
@ -70,7 +68,7 @@ public class GenCollectedHeap extends SharedHeap {
}
public int nGens() {
return (int) nGensField.getValue(addr);
return 2; // Young + Old
}
public Generation getGen(int i) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,11 +112,7 @@ public class Universe {
return "";
}
public CollectedHeap heap() {
try {
return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
} catch (WrongTypeException e) {
return new CollectedHeap(collectedHeapField.getValue());
}
return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
}
public static long getNarrowOopBase() {

View file

@ -81,53 +81,48 @@ public class HeapSummary extends Tool {
System.out.println();
System.out.println("Heap Usage:");
if (heap instanceof SharedHeap) {
SharedHeap sharedHeap = (SharedHeap) heap;
if (sharedHeap instanceof GenCollectedHeap) {
GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
for (int n = 0; n < genHeap.nGens(); n++) {
Generation gen = genHeap.getGen(n);
if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
System.out.println("New Generation (Eden + 1 Survivor Space):");
printGen(gen);
if (heap instanceof GenCollectedHeap) {
GenCollectedHeap genHeap = (GenCollectedHeap) heap;
for (int n = 0; n < genHeap.nGens(); n++) {
Generation gen = genHeap.getGen(n);
if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
System.out.println("New Generation (Eden + 1 Survivor Space):");
printGen(gen);
ContiguousSpace eden = ((DefNewGeneration)gen).eden();
System.out.println("Eden Space:");
printSpace(eden);
ContiguousSpace eden = ((DefNewGeneration)gen).eden();
System.out.println("Eden Space:");
printSpace(eden);
ContiguousSpace from = ((DefNewGeneration)gen).from();
System.out.println("From Space:");
printSpace(from);
ContiguousSpace from = ((DefNewGeneration)gen).from();
System.out.println("From Space:");
printSpace(from);
ContiguousSpace to = ((DefNewGeneration)gen).to();
System.out.println("To Space:");
printSpace(to);
} else {
System.out.println(gen.name() + ":");
printGen(gen);
}
ContiguousSpace to = ((DefNewGeneration)gen).to();
System.out.println("To Space:");
printSpace(to);
} else {
System.out.println(gen.name() + ":");
printGen(gen);
}
} else if (sharedHeap instanceof G1CollectedHeap) {
G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
G1MonitoringSupport g1mm = g1h.g1mm();
long edenRegionNum = g1mm.edenRegionNum();
long survivorRegionNum = g1mm.survivorRegionNum();
HeapRegionSetBase oldSet = g1h.oldSet();
HeapRegionSetBase humongousSet = g1h.humongousSet();
long oldRegionNum = oldSet.count().length()
+ humongousSet.count().capacity() / HeapRegion.grainBytes();
printG1Space("G1 Heap:", g1h.n_regions(),
g1h.used(), g1h.capacity());
System.out.println("G1 Young Generation:");
printG1Space("Eden Space:", edenRegionNum,
g1mm.edenUsed(), g1mm.edenCommitted());
printG1Space("Survivor Space:", survivorRegionNum,
g1mm.survivorUsed(), g1mm.survivorCommitted());
printG1Space("G1 Old Generation:", oldRegionNum,
g1mm.oldUsed(), g1mm.oldCommitted());
} else {
throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
}
} else if (heap instanceof G1CollectedHeap) {
G1CollectedHeap g1h = (G1CollectedHeap) heap;
G1MonitoringSupport g1mm = g1h.g1mm();
long edenRegionNum = g1mm.edenRegionNum();
long survivorRegionNum = g1mm.survivorRegionNum();
HeapRegionSetBase oldSet = g1h.oldSet();
HeapRegionSetBase humongousSet = g1h.humongousSet();
long oldRegionNum = oldSet.count().length()
+ humongousSet.count().capacity() / HeapRegion.grainBytes();
printG1Space("G1 Heap:", g1h.n_regions(),
g1h.used(), g1h.capacity());
System.out.println("G1 Young Generation:");
printG1Space("Eden Space:", edenRegionNum,
g1mm.edenUsed(), g1mm.edenCommitted());
printG1Space("Survivor Space:", survivorRegionNum,
g1mm.survivorUsed(), g1mm.survivorCommitted());
printG1Space("G1 Old Generation:", oldRegionNum,
g1mm.oldUsed(), g1mm.oldCommitted());
} else if (heap instanceof ParallelScavengeHeap) {
ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
PSYoungGen youngGen = psh.youngGen();

View file

@ -32,10 +32,12 @@ import com.sun.java.swing.action.ActionManager;
public class HSDBActionManager extends ActionManager {
public static ActionManager getInstance() {
if (manager == null) {
manager = new HSDBActionManager();
ActionManager m = ActionManager.getInstance();
if (m == null) {
m = new HSDBActionManager();
ActionManager.setInstance(m);
}
return manager;
return m;
}
protected void addActions() {

View file

@ -98,7 +98,7 @@ COMMON_VM_DEBUG_TARGETS=debug debug1 docs export_debug
COMMON_VM_OPTIMIZED_TARGETS=optimized optimized1 docs export_optimized
# JDK directory list
JDK_DIRS=bin include jre lib demo
JDK_DIRS=bin include lib demo
all: all_product all_fastdebug
@ -373,33 +373,33 @@ $(EXPORT_SERVER_DIR)/%.map: $(C2_BUILD_DIR)/%.map
$(install-file)
$(EXPORT_LIB_DIR)/%.lib: $(C2_BUILD_DIR)/%.lib
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
$(EXPORT_BIN_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll
$(EXPORT_BIN_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb
$(EXPORT_BIN_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.map: $(C2_BUILD_DIR)/%.map
$(EXPORT_BIN_DIR)/%.map: $(C2_BUILD_DIR)/%.map
$(install-file)
# Unix
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
$(EXPORT_LIB_ARCH_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
$(install-dir)
@ -423,33 +423,33 @@ $(EXPORT_CLIENT_DIR)/%.map: $(C1_BUILD_DIR)/%.map
$(install-file)
$(EXPORT_LIB_DIR)/%.lib: $(C1_BUILD_DIR)/%.lib
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
$(EXPORT_BIN_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll
$(EXPORT_BIN_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb
$(EXPORT_BIN_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.map: $(C1_BUILD_DIR)/%.map
$(EXPORT_BIN_DIR)/%.map: $(C1_BUILD_DIR)/%.map
$(install-file)
# Unix
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
$(EXPORT_LIB_ARCH_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_CLIENT_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
$(install-dir)
@ -473,28 +473,28 @@ $(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
$(install-file)
$(EXPORT_LIB_DIR)/%.lib: $(MINIMAL1_BUILD_DIR)/%.lib
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(EXPORT_BIN_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll
$(EXPORT_BIN_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb
$(EXPORT_BIN_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb
$(install-file)
$(EXPORT_JRE_BIN_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
$(EXPORT_BIN_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
$(install-file)
# Unix
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(EXPORT_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(install-file)
@ -509,11 +509,11 @@ $(EXPORT_LIB_DIR)/%.jar: $(ZERO_BUILD_DIR)/../generated/%.jar
$(EXPORT_INCLUDE_DIR)/%: $(ZERO_BUILD_DIR)/../generated/jvmtifiles/%
$(install-file)
# Unix
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
$(EXPORT_LIB_ARCH_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
@ -522,7 +522,7 @@ $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
$(install-dir)
@ -536,11 +536,11 @@ $(EXPORT_LIB_DIR)/%.jar: $(CORE_BUILD_DIR)/../generated/%.jar
$(EXPORT_INCLUDE_DIR)/%: $(CORE_BUILD_DIR)/../generated/jvmtifiles/%
$(install-file)
# Unix
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(CORE_BUILD_DIR)/%.debuginfo
$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(CORE_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(CORE_BUILD_DIR)/%.diz
$(EXPORT_LIB_ARCH_DIR)/%.diz: $(CORE_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
@ -558,11 +558,11 @@ $(EXPORT_LIB_DIR)/%.jar: $(SHARK_BUILD_DIR)/../generated/%.jar
$(EXPORT_INCLUDE_DIR)/%: $(SHARK_BUILD_DIR)/../generated/jvmtifiles/%
$(install-file)
# Unix
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_BUILD_DIR)/%.debuginfo
$(EXPORT_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
$(EXPORT_LIB_ARCH_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
@ -571,7 +571,7 @@ $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_BUILD_DIR)/%.debuginfo
$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
$(install-dir)

View file

@ -184,17 +184,17 @@ LIBRARY_SUFFIX=so
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# ifeq ($(ZIP_DEBUGINFO_FILES),1)
# EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
# EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
# else
# EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
# EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
# endif
#endif
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt

View file

@ -122,7 +122,7 @@ LIBS += -lm -ldl -lpthread
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
#----------------------------------------------------------------------
# jvm_db & dtrace

View file

@ -265,23 +265,23 @@ endif
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
else
ifeq ($(OS_VENDOR), Darwin)
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
else
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
endif
endif
endif
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@ -324,34 +324,34 @@ endif
# Serviceability Binaries
# No SA Support for PPC, IA64, ARM or zero
ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
ADD_SA_BINARIES/x86 = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
ADD_SA_BINARIES/x86 += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
ifeq ($(OS_VENDOR), Darwin)
ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
ADD_SA_BINARIES/x86 += $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
else
ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
ADD_SA_BINARIES/x86 += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif
endif
ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
ADD_SA_BINARIES/sparc = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
ADD_SA_BINARIES/universal = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
ADD_SA_BINARIES/universal = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
ADD_SA_BINARIES/universal += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
ifeq ($(OS_VENDOR), Darwin)
ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
ADD_SA_BINARIES/universal += $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
else
ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
ADD_SA_BINARIES/universal += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif
endif
@ -388,25 +388,25 @@ ifeq ($(OS_VENDOR), Darwin)
endif
# Binaries to 'universalize' if built
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
# Files to simply copy in place
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/Xusage.txt
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/Xusage.txt
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.diz
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.diz
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.diz
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.diz
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.diz
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/libjvm.diz
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libjsig.diz
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libsaproc.diz
else
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
endif
endif

View file

@ -54,12 +54,12 @@ all_debug_universal:
# Consolidate architecture builds into a single Universal binary
universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
$(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
$(RM) -r $(EXPORT_PATH)/lib/{i386,amd64}
# Package built libraries in a universal binary
$(UNIVERSAL_LIPO_LIST):
BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
BUILT_LIPO_FILES="`find $(EXPORT_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
$(MKDIR) -p $(shell dirname $@); \
lipo -create -output $@ $${BUILT_LIPO_FILES}; \
@ -70,7 +70,7 @@ $(UNIVERSAL_LIPO_LIST):
# - copies directories; including empty dirs
# - copies files, symlinks, other non-directory files
$(UNIVERSAL_COPY_LIST):
BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`" || test $$? = "1"; \
BUILT_COPY_FILES="`find $(EXPORT_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_LIB_DIR)/,,$@) -prune 2>/dev/null`" || test $$? = "1"; \
if [ -n "$${BUILT_COPY_FILES}" ]; then \
for i in $${BUILT_COPY_FILES}; do \
$(MKDIR) -p $(shell dirname $@); \
@ -80,21 +80,21 @@ $(UNIVERSAL_COPY_LIST):
# Replace arch specific binaries with universal binaries
# Do not touch jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
# Do not touch lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
# That symbolic link belongs to the 'jdk' build.
export_universal:
$(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
$(RM) -r $(JDK_IMAGE_DIR)/jre/lib/{i386,amd64}
$(RM) -r $(EXPORT_PATH)/lib/{i386,amd64}
$(RM) -r $(JDK_IMAGE_DIR)/lib/{i386,amd64}
($(CD) $(EXPORT_PATH) && \
$(TAR) -cf - *) | \
($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xpf -)
# Overlay universal binaries
# Do not touch jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
# Do not touch lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
# That symbolic link belongs to the 'jdk' build.
copy_universal:
$(RM) -r $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{i386,amd64}
$(RM) -r $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/lib/{i386,amd64}
($(CD) $(EXPORT_PATH)$(COPY_SUBDIR) && \
$(TAR) -cf - *) | \
($(CD) $(JDK_IMAGE_DIR)$(COPY_SUBDIR) && $(TAR) -xpf -)

View file

@ -350,15 +350,13 @@ MAKE_ARGS += BOOT_JDK_SOURCETARGET="$(BOOT_JDK_SOURCETARGET)"
EXPORT_INCLUDE_DIR = $(EXPORT_PATH)/include
EXPORT_DOCS_DIR = $(EXPORT_PATH)/docs
EXPORT_LIB_DIR = $(EXPORT_PATH)/lib
EXPORT_JRE_DIR = $(EXPORT_PATH)/jre
EXPORT_JRE_BIN_DIR = $(EXPORT_JRE_DIR)/bin
EXPORT_JRE_LIB_DIR = $(EXPORT_JRE_DIR)/lib
EXPORT_JRE_LIB_ARCH_DIR = $(EXPORT_JRE_LIB_DIR)/$(LIBARCH)
EXPORT_BIN_DIR = $(EXPORT_PATH)/bin
EXPORT_LIB_ARCH_DIR = $(EXPORT_LIB_DIR)/$(LIBARCH)
# non-universal macosx builds need to appear universal
ifeq ($(OS_VENDOR), Darwin)
ifneq ($(MACOSX_UNIVERSAL), true)
EXPORT_JRE_LIB_ARCH_DIR = $(EXPORT_JRE_LIB_DIR)
EXPORT_LIB_ARCH_DIR = $(EXPORT_LIB_DIR)
endif
endif
@ -370,4 +368,3 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
.PHONY: $(HS_ALT_MAKE)/defs.make

View file

@ -244,17 +244,17 @@ LIBRARY_SUFFIX=so
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
else
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
endif
endif
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@ -295,14 +295,14 @@ endif
# Serviceability Binaries
ADD_SA_BINARIES/DEFAULT = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
ADD_SA_BINARIES/DEFAULT = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
ADD_SA_BINARIES/DEFAULT += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
ADD_SA_BINARIES/DEFAULT += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
ADD_SA_BINARIES/DEFAULT += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
ADD_SA_BINARIES/DEFAULT += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif

View file

@ -127,7 +127,7 @@ LIBS += -lm -ldl -lpthread
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
#----------------------------------------------------------------------
# jvm_db & dtrace

View file

@ -224,17 +224,17 @@ LIBRARY_SUFFIX=so
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
else
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
endif
endif
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
ifeq ($(JVM_VARIANT_SERVER),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@ -295,12 +295,12 @@ ifeq ($(JVM_VARIANT_CLIENT),true)
endif
endif
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar

View file

@ -130,8 +130,9 @@ endif
$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo $(LOG_INFO) Making $@
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. $(EXTRA_CFLAGS) \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c \
$(EXTRA_LDFLAGS) -lc -lthread -ldoor
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
@ -216,8 +217,9 @@ endif
$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo $(LOG_INFO) Making $@
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. $(EXTRA_CFLAGS) \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c \
$(EXTRA_LDFLAGS) -lc -lthread -ldoor
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@

View file

@ -50,7 +50,9 @@ endif
$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo $(LOG_INFO) Making signal interposition lib...
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
$(EXTRA_CFLAGS) \
$(LFLAGS_JSIG) $(EXTRA_LDFLAGS) \
-o $@ $(JSIGSRCDIR)/jsig.c -ldl
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@

View file

@ -37,6 +37,11 @@ ifndef USE_GCC
OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
endif
# Need extra inlining to get oop_ps_push_contents functions to perform well enough.
ifndef USE_GCC
OPT_CFLAGS/psPromotionManager.o = $(OPT_CFLAGS) -W2,-Ainline:inc=1000
endif
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
ifeq ("${Platform_compiler}", "sparcWorks")

View file

@ -89,6 +89,17 @@ $(shell uname -r -v \
# when actually building on Nevada-B158 or earlier:
#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
$(SADISOBJ): $(SADISSRCFILES)
$(QUIETLY) $(CC) \
$(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
-I$(SASRCDIR) \
-I$(GENERATED) \
-I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(SOLARIS_11_B159_OR_LATER) \
$(EXTRA_CFLAGS) \
$(SADISSRCFILES) \
-c -o $(SADISOBJ)
$(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
@ -103,23 +114,13 @@ $(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
-I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(SOLARIS_11_B159_OR_LATER) \
$(SASRCFILES) \
$(EXTRA_CXXFLAGS) $(EXTRA_LDFLAGS) \
$(SADISOBJ) \
$(SASRCFILES) \
$(SA_LFLAGS) \
-o $@ \
-ldl -ldemangle -lthread -lc
$(SADISOBJ): $(SADISSRCFILES)
$(QUIETLY) $(CC) \
$(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
-I$(SASRCDIR) \
-I$(GENERATED) \
-I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(SOLARIS_11_B159_OR_LATER) \
$(SADISSRCFILES) \
-c -o $(SADISOBJ)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@

View file

@ -148,7 +148,7 @@ LIBS += -lkstat
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
#----------------------------------------------------------------------
# jvm_db & dtrace
@ -288,6 +288,8 @@ else
endif
endif
LFLAGS_VM += $(EXTRA_LDFLAGS)
ifdef USE_GCC
LINK_VM = $(LINK_LIB.CC)
else

View file

@ -249,8 +249,8 @@ ifeq ($(BUILD_WIN_SA), 1)
endif
endif
EXPORT_SERVER_DIR = $(EXPORT_JRE_BIN_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_BIN_DIR)/client
EXPORT_SERVER_DIR = $(EXPORT_BIN_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_BIN_DIR)/client
ifeq ($(JVM_VARIANT_SERVER),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@ -280,13 +280,13 @@ endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
ifeq ($(BUILD_WIN_SA), 1)
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.diz
EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.diz
else
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.pdb
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.map
EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.pdb
EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.map
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar

View file

@ -91,6 +91,9 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
SAWINDBG=sawindbg.dll
# Resource file containing VERSIONINFO
SA_Res_Files=.\version.sares
checkAndBuildSA:: $(SAWINDBG)
# These do not need to be optimized (don't run a lot of code) and it
@ -126,10 +129,13 @@ SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
# Note that we do not keep sawindbj.obj around as it would then
# get included in the dumpbin command in build_vm_def.sh
# Force resources to be rebuilt every time
$(SA_Res_Files): FORCE
# In VS2005 or VS2008 the link command creates a .manifest file that we want
# to insert into the linked artifact so we do not need to track it separately.
# Use ";#2" for .dll and ";#1" for .exe in the MT command below:
$(SAWINDBG): $(SASRCFILES)
$(SAWINDBG): $(SASRCFILES) $(SA_Res_Files)
set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
$(CXX) @<<
-I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32"
@ -138,7 +144,7 @@ $(SAWINDBG): $(SASRCFILES)
-out:$*.obj
<<
set LIB=$(SA_LIB)$(LIB)
$(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS)
$(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS) $(SA_Res_Files)
!if "$(MT)" != ""
$(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
!endif
@ -150,6 +156,9 @@ $(SAWINDBG): $(SASRCFILES)
!endif
-@rm -f $*.obj
{$(COMMONSRC)\os\windows\vm}.rc.sares:
@$(RC) $(RC_FLAGS) /D "HS_FNAME=$(SAWINDBG)" /fo"$@" $<
cleanall :
rm -rf $(GENERATED)/saclasses
rm -rf $(GENERATED)/sa-jdi.jar

View file

@ -2138,30 +2138,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ br(Assembler::EQ, resolved);
// resolve first time through
address entry;
switch (bytecode()) {
case Bytecodes::_getstatic:
case Bytecodes::_putstatic:
case Bytecodes::_getfield:
case Bytecodes::_putfield:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
break;
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
break;
case Bytecodes::_invokehandle:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
break;
case Bytecodes::_invokedynamic:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
break;
default:
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
}
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
__ mov(temp, (int) bytecode());
__ call_VM(noreg, entry, temp);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -137,7 +137,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void field_offset_at(int n, Register tmp, Register dest, Register base);
int field_offset_at(Register object, address bcp, int offset);
void fast_iaaccess(int n, address bcp);
void fast_iagetfield(address bcp);
void fast_iaputfield(address bcp, bool do_store_check);
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);

View file

@ -437,6 +437,14 @@ void TemplateTable::locals_index(Register Rdst, int offset) {
}
void TemplateTable::iload() {
iload_internal();
}
void TemplateTable::nofast_iload() {
iload_internal(may_not_rewrite);
}
void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
// Get the local value into tos
@ -445,7 +453,7 @@ void TemplateTable::iload() {
// Rewrite iload,iload pair into fast_iload2
// iload,caload pair into fast_icaload
if (RewriteFrequentPairs) {
if (RewriteFrequentPairs && rc == may_rewrite) {
Label Lrewrite, Ldone;
Register Rnext_byte = R3_ARG1,
Rrewrite_to = R6_ARG4,
@ -709,6 +717,14 @@ void TemplateTable::aload(int n) {
}
void TemplateTable::aload_0() {
aload_0_internal();
}
void TemplateTable::nofast_aload_0() {
aload_0_internal(may_not_rewrite);
}
void TemplateTable::aload_0_internal(RewriteControl rc) {
transition(vtos, atos);
// According to bytecode histograms, the pairs:
//
@ -732,7 +748,7 @@ void TemplateTable::aload_0() {
// These bytecodes with a small amount of code are most profitable
// to rewrite.
if (RewriteFrequentPairs) {
if (RewriteFrequentPairs && rc == may_rewrite) {
Label Lrewrite, Ldont_rewrite;
Register Rnext_byte = R3_ARG1,
@ -2144,6 +2160,12 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ get_cache_and_index_at_bcp(Rcache, 1, index_size);
Label Lresolved, Ldone;
Bytecodes::Code code = bytecode();
switch (code) {
case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
}
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
// We are resolved if the indices offset contains the current bytecode.
#if defined(VM_LITTLE_ENDIAN)
@ -2152,24 +2174,11 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
#endif
// Acquire by cmp-br-isync (see below).
__ cmpdi(CCR0, Rscratch, (int)bytecode());
__ cmpdi(CCR0, Rscratch, (int)code);
__ beq(CCR0, Lresolved);
address entry = NULL;
switch (bytecode()) {
case Bytecodes::_getstatic : // fall through
case Bytecodes::_putstatic : // fall through
case Bytecodes::_getfield : // fall through
case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
default : ShouldNotReachHere(); break;
}
__ li(R4_ARG2, (int)bytecode());
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
__ li(R4_ARG2, code);
__ call_VM(noreg, entry, R4_ARG2, true);
// Update registers with resolved info.
@ -2350,7 +2359,7 @@ void TemplateTable::pop_and_check_object(Register Roop) {
}
// PPC64: implement volatile loads as fence-store-acquire.
void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
Label Lacquire, Lisync;
@ -2366,7 +2375,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
static address field_branch_table[number_of_states],
static_branch_table[number_of_states];
address* branch_table = is_static ? static_branch_table : field_branch_table;
address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table;
// Get field offset.
resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
@ -2417,7 +2426,14 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
#ifdef ASSERT
__ bind(LFlagInvalid);
__ stop("got invalid flag", 0x654);
#endif
if (!is_static && rc == may_not_rewrite) {
// We reuse the code from is_static. It's jumped to via the table above.
return;
}
#ifdef ASSERT
// __ bind(Lvtos);
address pc_before_fence = __ pc();
__ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
@ -2434,7 +2450,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[dtos] = __ pc(); // non-volatile_entry point
__ lfdx(F15_ftos, Rclass_or_obj, Roffset);
__ push(dtos);
if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
}
{
Label acquire_double;
__ beq(CCR6, acquire_double); // Volatile?
@ -2453,7 +2471,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[ftos] = __ pc(); // non-volatile_entry point
__ lfsx(F15_ftos, Rclass_or_obj, Roffset);
__ push(ftos);
if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch);
}
{
Label acquire_float;
__ beq(CCR6, acquire_float); // Volatile?
@ -2472,7 +2492,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[itos] = __ pc(); // non-volatile_entry point
__ lwax(R17_tos, Rclass_or_obj, Roffset);
__ push(itos);
if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
}
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@ -2483,7 +2505,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[ltos] = __ pc(); // non-volatile_entry point
__ ldx(R17_tos, Rclass_or_obj, Roffset);
__ push(ltos);
if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
}
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@ -2495,7 +2519,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ lbzx(R17_tos, Rclass_or_obj, Roffset);
__ extsb(R17_tos, R17_tos);
__ push(btos);
if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
}
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@ -2506,7 +2532,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[ctos] = __ pc(); // non-volatile_entry point
__ lhzx(R17_tos, Rclass_or_obj, Roffset);
__ push(ctos);
if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
}
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@ -2517,7 +2545,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[stos] = __ pc(); // non-volatile_entry point
__ lhax(R17_tos, Rclass_or_obj, Roffset);
__ push(stos);
if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
}
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@ -2530,7 +2560,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ verify_oop(R17_tos);
__ push(atos);
//__ dcbt(R17_tos); // prefetch
if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
}
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@ -2553,6 +2585,10 @@ void TemplateTable::getfield(int byte_no) {
getfield_or_static(byte_no, false);
}
void TemplateTable::nofast_getfield(int byte_no) {
getfield_or_static(byte_no, false, may_not_rewrite);
}
void TemplateTable::getstatic(int byte_no) {
getfield_or_static(byte_no, true);
}
@ -2643,7 +2679,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo
}
// PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
Label Lvolatile;
const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
@ -2657,10 +2693,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Rbc = Rscratch3;
const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
static address field_branch_table[number_of_states],
static address field_rw_branch_table[number_of_states],
field_norw_branch_table[number_of_states],
static_branch_table[number_of_states];
address* branch_table = is_static ? static_branch_table : field_branch_table;
address* branch_table = is_static ? static_branch_table :
(rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table);
// Stack (grows up):
// value
@ -2688,7 +2726,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
// Load from branch table and dispatch (volatile case: one instruction ahead).
__ sldi(Rflags, Rflags, LogBytesPerWord);
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ cmpwi(CR_is_vol, Rscratch, 1); // Volatile?
}
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
__ ldx(Rbtable, Rbtable, Rflags);
@ -2715,9 +2755,13 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
assert(branch_table[dtos] == 0, "can't compute twice");
branch_table[dtos] = __ pc(); // non-volatile_entry point
__ pop(dtos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
if (!is_static) {
pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
}
__ stfdx(F15_ftos, Rclass_or_obj, Roffset);
if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@ -2731,7 +2775,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ftos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stfsx(F15_ftos, Rclass_or_obj, Roffset);
if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@ -2745,7 +2791,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(itos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stwx(R17_tos, Rclass_or_obj, Roffset);
if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@ -2759,7 +2807,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ltos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stdx(R17_tos, Rclass_or_obj, Roffset);
if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@ -2773,7 +2823,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(btos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stbx(R17_tos, Rclass_or_obj, Roffset);
if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@ -2787,7 +2839,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ctos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
__ sthx(R17_tos, Rclass_or_obj, Roffset);
if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@ -2801,7 +2855,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(stos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ sthx(R17_tos, Rclass_or_obj, Roffset);
if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@ -2815,7 +2871,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(atos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@ -2839,6 +2897,10 @@ void TemplateTable::putfield(int byte_no) {
putfield_or_static(byte_no, false);
}
void TemplateTable::nofast_putfield(int byte_no) {
putfield_or_static(byte_no, false, may_not_rewrite);
}
void TemplateTable::putstatic(int byte_no) {
putfield_or_static(byte_no, true);
}
@ -3259,7 +3321,9 @@ void TemplateTable::invokevirtual(int byte_no) {
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
__ bfalse(CCR0, LnotFinal);
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
if (RewriteBytecodes && !UseSharedSpaces) {
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
}
invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
__ align(32, 12);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -203,7 +203,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void field_offset_at(int n, Register tmp, Register dest, Register base);
int field_offset_at(Register object, address bcp, int offset);
void fast_iaaccess(int n, address bcp);
void fast_iagetfield(address bcp);
void fast_iaputfield(address bcp, bool do_store_check );
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);

View file

@ -385,7 +385,6 @@ void TemplateTable::fast_aldc(bool wide) {
__ verify_oop(Otos_i);
}
void TemplateTable::ldc2_w() {
transition(vtos, vtos);
Label Long, exit;
@ -430,22 +429,28 @@ void TemplateTable::ldc2_w() {
__ bind(exit);
}
void TemplateTable::locals_index(Register reg, int offset) {
__ ldub( at_bcp(offset), reg );
}
void TemplateTable::locals_index_wide(Register reg) {
// offset is 2, not 1, because Lbcp points to wide prefix code
__ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
}
void TemplateTable::iload() {
iload_internal();
}
void TemplateTable::nofast_iload() {
iload_internal(may_not_rewrite);
}
void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
// Rewrite iload,iload pair into fast_iload2
// iload,caload pair into fast_icaload
if (RewriteFrequentPairs) {
if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
// get next byte
@ -672,8 +677,15 @@ void TemplateTable::aload(int n) {
__ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
}
void TemplateTable::aload_0() {
aload_0_internal();
}
void TemplateTable::nofast_aload_0() {
aload_0_internal(may_not_rewrite);
}
void TemplateTable::aload_0_internal(RewriteControl rc) {
transition(vtos, atos);
// According to bytecode histograms, the pairs:
@ -687,7 +699,7 @@ void TemplateTable::aload_0() {
// bytecode into a pair bytecode; otherwise it rewrites the current
// bytecode into _fast_aload_0 that doesn't do the pair check anymore.
//
if (RewriteFrequentPairs) {
if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
// get next byte
@ -731,7 +743,6 @@ void TemplateTable::aload_0() {
}
}
void TemplateTable::istore() {
transition(itos, vtos);
locals_index(G3_scratch);
@ -2045,30 +2056,21 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
Register index,
size_t index_size) {
// Depends on cpCacheOop layout!
Label resolved;
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
__ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
__ br(Assembler::equal, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
address entry;
switch (bytecode()) {
case Bytecodes::_getstatic : // fall through
case Bytecodes::_putstatic : // fall through
case Bytecodes::_getfield : // fall through
case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
default:
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
Bytecodes::Code code = bytecode();
switch (code) {
case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
}
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
__ cmp(Lbyte_code, code); // have we resolved this bytecode?
__ br(Assembler::equal, false, Assembler::pt, resolved);
__ delayed()->set(code, O1);
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
// first time invocation - must resolve first
__ call_VM(noreg, entry, O1);
// Update registers with resolved info
@ -2183,7 +2185,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache,
}
}
void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
Register Rcache = G3_scratch;
@ -2231,7 +2233,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_heap_oop(Rclass, Roffset, Otos_i);
__ verify_oop(Otos_i);
__ push(atos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@ -2246,7 +2248,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// itos
__ ld(Rclass, Roffset, Otos_i);
__ push(itos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@ -2262,7 +2264,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// load must be atomic
__ ld_long(Rclass, Roffset, Otos_l);
__ push(ltos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@ -2277,7 +2279,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// btos
__ ldsb(Rclass, Roffset, Otos_i);
__ push(itos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@ -2292,7 +2294,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// ctos
__ lduh(Rclass, Roffset, Otos_i);
__ push(itos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@ -2307,7 +2309,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// stos
__ ldsh(Rclass, Roffset, Otos_i);
__ push(itos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@ -2323,7 +2325,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// ftos
__ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
__ push(ftos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@ -2335,7 +2337,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// dtos
__ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
__ push(dtos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
}
@ -2350,16 +2352,18 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ bind(exit);
}
void TemplateTable::getfield(int byte_no) {
getfield_or_static(byte_no, false);
}
void TemplateTable::nofast_getfield(int byte_no) {
getfield_or_static(byte_no, false, may_not_rewrite);
}
void TemplateTable::getstatic(int byte_no) {
getfield_or_static(byte_no, true);
}
void TemplateTable::fast_accessfield(TosState state) {
transition(atos, state);
Register Rcache = G3_scratch;
@ -2544,7 +2548,7 @@ void TemplateTable::pop_and_check_object(Register r) {
__ verify_oop(r);
}
void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
Register Rcache = G3_scratch;
Register index = G4_scratch;
@ -2620,7 +2624,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
pop_and_check_object(Rclass);
__ st(Otos_i, Rclass, Roffset);
patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
__ ba(checkVolatile);
__ delayed()->tst(Lscratch);
}
@ -2636,7 +2640,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
pop_and_check_object(Rclass);
__ verify_oop(Otos_i);
do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
__ ba(checkVolatile);
__ delayed()->tst(Lscratch);
}
@ -2653,7 +2657,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
if (!is_static) pop_and_check_object(Rclass);
__ stb(Otos_i, Rclass, Roffset);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@ -2670,7 +2674,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_l();
if (!is_static) pop_and_check_object(Rclass);
__ st_long(Otos_l, Rclass, Roffset);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@ -2687,7 +2691,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
if (!is_static) pop_and_check_object(Rclass);
__ sth(Otos_i, Rclass, Roffset);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@ -2704,7 +2708,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
if (!is_static) pop_and_check_object(Rclass);
__ sth(Otos_i, Rclass, Roffset);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@ -2721,7 +2725,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_f();
if (!is_static) pop_and_check_object(Rclass);
__ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@ -2735,7 +2739,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_d();
if (!is_static) pop_and_check_object(Rclass);
__ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
}
}
@ -2809,16 +2813,18 @@ void TemplateTable::fast_storefield(TosState state) {
}
}
void TemplateTable::putfield(int byte_no) {
putfield_or_static(byte_no, false);
}
void TemplateTable::nofast_putfield(int byte_no) {
putfield_or_static(byte_no, false, may_not_rewrite);
}
void TemplateTable::putstatic(int byte_no) {
putfield_or_static(byte_no, true);
}
void TemplateTable::fast_xaccess(TosState state) {
transition(vtos, state);
Register Rcache = G3_scratch;
@ -2971,7 +2977,9 @@ void TemplateTable::invokevirtual(int byte_no) {
__ br(Assembler::zero, false, Assembler::pt, notFinal);
__ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
if (RewriteBytecodes && !UseSharedSpaces) {
patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
}
invokevfinal_helper(Rscratch, Rret);

View file

@ -543,8 +543,16 @@ void TemplateTable::locals_index(Register reg, int offset) {
}
void TemplateTable::iload() {
iload_internal();
}
void TemplateTable::nofast_iload() {
iload_internal(may_not_rewrite);
}
void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
if (RewriteFrequentPairs) {
if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
LP64_ONLY(assert(rbx != bc, "register damaged"));
@ -815,6 +823,14 @@ void TemplateTable::aload(int n) {
}
void TemplateTable::aload_0() {
aload_0_internal();
}
void TemplateTable::nofast_aload_0() {
aload_0_internal(may_not_rewrite);
}
void TemplateTable::aload_0_internal(RewriteControl rc) {
transition(vtos, atos);
// According to bytecode histograms, the pairs:
//
@ -837,7 +853,7 @@ void TemplateTable::aload_0() {
// aload_0, iload_1
// These bytecodes with a small amount of code are most profitable
// to rewrite
if (RewriteFrequentPairs) {
if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
@ -2491,29 +2507,21 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers(Rcache, index, temp);
Label resolved;
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
__ jcc(Assembler::equal, resolved);
Bytecodes::Code code = bytecode();
switch (code) {
case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
}
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ cmpl(temp, code); // have we resolved this bytecode?
__ jcc(Assembler::equal, resolved);
// resolve first time through
address entry;
switch (bytecode()) {
case Bytecodes::_getstatic : // fall through
case Bytecodes::_putstatic : // fall through
case Bytecodes::_getfield : // fall through
case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
default:
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
}
__ movl(temp, (int)bytecode());
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
__ movl(temp, code);
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
@ -2628,7 +2636,7 @@ void TemplateTable::pop_and_check_object(Register r) {
__ verify_oop(r);
}
void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
const Register cache = rcx;
@ -2660,7 +2668,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_signed_byte(rax, field);
__ push(btos);
// Rewrite bytecode to be faster
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
}
__ jmp(Done);
@ -2671,7 +2679,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// atos
__ load_heap_oop(rax, field);
__ push(atos);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
}
__ jmp(Done);
@ -2683,7 +2691,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ movl(rax, field);
__ push(itos);
// Rewrite bytecode to be faster
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
}
__ jmp(Done);
@ -2695,7 +2703,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_unsigned_short(rax, field);
__ push(ctos);
// Rewrite bytecode to be faster
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
}
__ jmp(Done);
@ -2707,7 +2715,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_signed_short(rax, field);
__ push(stos);
// Rewrite bytecode to be faster
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
}
__ jmp(Done);
@ -2731,7 +2739,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ push(ltos);
// Rewrite bytecode to be faster
LP64_ONLY(if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
__ jmp(Done);
__ bind(notLong);
@ -2743,7 +2751,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
NOT_LP64(__ fld_s(field));
__ push(ftos);
// Rewrite bytecode to be faster
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
}
__ jmp(Done);
@ -2758,7 +2766,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
NOT_LP64(__ fld_d(field));
__ push(dtos);
// Rewrite bytecode to be faster
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
}
#ifdef ASSERT
@ -2779,6 +2787,10 @@ void TemplateTable::getfield(int byte_no) {
getfield_or_static(byte_no, false);
}
void TemplateTable::nofast_getfield(int byte_no) {
getfield_or_static(byte_no, false, may_not_rewrite);
}
void TemplateTable::getstatic(int byte_no) {
getfield_or_static(byte_no, true);
}
@ -2870,7 +2882,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
}
}
void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
const Register cache = rcx;
@ -2911,7 +2923,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(btos);
if (!is_static) pop_and_check_object(obj);
__ movb(field, rax);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@ -2927,7 +2939,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
// Store into the field
do_oop_store(_masm, field, rax, _bs->kind(), false);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@ -2942,7 +2954,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(itos);
if (!is_static) pop_and_check_object(obj);
__ movl(field, rax);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@ -2957,7 +2969,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ctos);
if (!is_static) pop_and_check_object(obj);
__ movw(field, rax);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@ -2972,7 +2984,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(stos);
if (!is_static) pop_and_check_object(obj);
__ movw(field, rax);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@ -2988,7 +3000,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ltos);
if (!is_static) pop_and_check_object(obj);
__ movq(field, rax);
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@ -3035,7 +3047,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
NOT_LP64( __ fstp_s(field);)
LP64_ONLY( __ movflt(field, xmm0);)
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@ -3053,7 +3065,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
NOT_LP64( __ fstp_d(field);)
LP64_ONLY( __ movdbl(field, xmm0);)
if (!is_static) {
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
}
}
@ -3079,6 +3091,10 @@ void TemplateTable::putfield(int byte_no) {
putfield_or_static(byte_no, false);
}
void TemplateTable::nofast_putfield(int byte_no) {
putfield_or_static(byte_no, false, may_not_rewrite);
}
void TemplateTable::putstatic(int byte_no) {
putfield_or_static(byte_no, true);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,8 +22,8 @@
*
*/
#ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
#define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
#ifndef CPU_X86_VM_TEMPLATETABLE_X86_HPP
#define CPU_X86_VM_TEMPLATETABLE_X86_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
@ -39,4 +39,4 @@
static void index_check(Register array, Register index);
static void index_check_without_pop(Register array, Register index);
#endif // CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
#endif // CPU_X86_VM_TEMPLATETABLE_X86_HPP

View file

@ -379,15 +379,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
};
};
void VM_Version::get_cpu_info_wrapper() {
get_cpu_info_stub(&_cpuid_info);
}
#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
#endif
void VM_Version::get_processor_features() {
_cpu = 4; // 486 by default
@ -401,9 +392,7 @@ void VM_Version::get_processor_features() {
if (!Use486InstrsOnly) {
// Get raw processor info
// Some platforms (like Win*) need a wrapper around here
// in order to properly handle SEGV for YMM registers test.
CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper);
get_cpu_info_stub(&_cpuid_info);
assert_is_initialized();
_cpu = extended_cpu_family();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -814,9 +814,9 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
}
#endif // INCLUDE_ALL_GCS
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
// If G1 is not enabled then attempt to go through the normal entry point
// Reference.get could be instrumented by jvmti
return generate_normal_entry(false);
}
address InterpreterGenerator::generate_native_entry(bool synchronized) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -213,7 +213,7 @@ void frame::zero_print_on_error(int frame_index,
valuebuf[buflen - 1] = '\0';
// Print the result
st->print_cr(" " PTR_FORMAT ": %-21s = %s", addr, fieldbuf, valuebuf);
st->print_cr(" " PTR_FORMAT ": %-21s = %s", p2i(addr), fieldbuf, valuebuf);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -144,6 +144,7 @@ int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UN
oop recv = STACK_OBJECT(-numArgs);
Klass* clazz = recv->klass();
Klass* klass_part = InstanceKlass::cast(clazz);
ResourceMark rm(THREAD);
klassVtable* vtable = klass_part->vtable();
Method* vmtarget = vtable->method_at(vmindex);

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,18 @@
#include "opto/runtime.hpp"
#endif
// For SafeFetch we need POSIX tls and setjmp
#include <setjmp.h>
#include <pthread.h>
static pthread_key_t g_jmpbuf_key;
// return the currently active jump buffer for this thread
// - if there is any, NULL otherwise. Called from
// zero signal handlers.
extern sigjmp_buf* get_jmp_buf_for_continuation() {
return (sigjmp_buf*) pthread_getspecific(g_jmpbuf_key);
}
// Declaration and definition of StubGenerator (no .hpp file).
// For a more detailed description of the stub routine structure
// see the comment in stubRoutines.hpp
@ -177,17 +189,55 @@ class StubGenerator: public StubCodeGenerator {
}
static int SafeFetch32(int *adr, int errValue) {
// set up a jump buffer; anchor the pointer to the jump buffer in tls; then
// do the pointer access. If pointer is invalid, we crash; in signal
// handler, we retrieve pointer to jmp buffer from tls, and jump back.
//
// Note: the jump buffer itself - which can get pretty large depending on
// the architecture - lives on the stack and that is fine, because we will
// not rewind the stack: either we crash, in which case signal handler
// frame is below us, or we don't crash, in which case it does not matter.
sigjmp_buf jb;
if (sigsetjmp(jb, 1)) {
// we crashed. clean up tls and return default value.
pthread_setspecific(g_jmpbuf_key, NULL);
return errValue;
} else {
// preparation phase
pthread_setspecific(g_jmpbuf_key, &jb);
}
int value = errValue;
value = *adr;
// all went well. clean tls.
pthread_setspecific(g_jmpbuf_key, NULL);
return value;
}
static intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
sigjmp_buf jb;
if (sigsetjmp(jb, 1)) {
// we crashed. clean up tls and return default value.
pthread_setspecific(g_jmpbuf_key, NULL);
return errValue;
} else {
// preparation phase
pthread_setspecific(g_jmpbuf_key, &jb);
}
intptr_t value = errValue;
value = *adr;
return value;
}
// all went well. clean tls.
pthread_setspecific(g_jmpbuf_key, NULL);
return value;
}
void generate_initial() {
// Generates all stubs and initializes the entry points
@ -241,6 +291,7 @@ class StubGenerator: public StubCodeGenerator {
generate_arraycopy_stubs();
// Safefetch stubs.
pthread_key_create(&g_jmpbuf_key, NULL);
StubRoutines::_safefetch32_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetch32);
StubRoutines::_safefetch32_fault_pc = NULL;
StubRoutines::_safefetch32_continuation_pc = NULL;

View file

@ -239,7 +239,6 @@ static bool check_signals = true;
static pid_t _initial_pid = 0;
static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
static sigset_t SR_sigset;
static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls.
// This describes the state of multipage support of the underlying
// OS. Note that this is of no interest to the outsize world and
@ -315,19 +314,6 @@ julong os::physical_memory() {
return Aix::physical_memory();
}
////////////////////////////////////////////////////////////////////////////////
// environment support
bool os::getenv(const char* name, char* buf, int len) {
const char* val = ::getenv(name);
if (val != NULL && strlen(val) < (size_t)len) {
strcpy(buf, val);
return true;
}
if (len > 0) buf[0] = 0; // return a null string
return false;
}
// Return true if user is running as root.
bool os::have_special_privileges() {
@ -1549,13 +1535,8 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
return NULL;
}
// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
// chances are you might want to run the generated bits against glibc-2.0
// libdl.so, so always use locking for any version of glibc.
void* os::dll_lookup(void* handle, const char* name) {
pthread_mutex_lock(&dl_mutex);
void* res = dlsym(handle, name);
pthread_mutex_unlock(&dl_mutex);
return res;
}
@ -3534,7 +3515,6 @@ void os::init(void) {
Aix::_main_thread = pthread_self();
initial_time_count = os::elapsed_counter();
pthread_mutex_init(&dl_mutex, NULL);
// If the pagesize of the VM is greater than 8K determine the appropriate
// number of initial guard pages. The user can change this with the

View file

@ -190,20 +190,6 @@ julong os::physical_memory() {
return Bsd::physical_memory();
}
////////////////////////////////////////////////////////////////////////////////
// environment support
bool os::getenv(const char* name, char* buf, int len) {
const char* val = ::getenv(name);
if (val != NULL && strlen(val) < (size_t)len) {
strcpy(buf, val);
return true;
}
if (len > 0) buf[0] = 0; // return a null string
return false;
}
// Return true if user is running as root.
bool os::have_special_privileges() {

View file

@ -158,9 +158,6 @@ static pid_t _initial_pid = 0;
static int SR_signum = SIGUSR2;
sigset_t SR_sigset;
// Used to protect dlsym() calls
static pthread_mutex_t dl_mutex;
// Declarations
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
@ -184,20 +181,6 @@ julong os::physical_memory() {
return Linux::physical_memory();
}
////////////////////////////////////////////////////////////////////////////////
// environment support
bool os::getenv(const char* name, char* buf, int len) {
const char* val = ::getenv(name);
if (val != NULL && strlen(val) < (size_t)len) {
strcpy(buf, val);
return true;
}
if (len > 0) buf[0] = 0; // return a null string
return false;
}
// Return true if user is running as root.
bool os::have_special_privileges() {
@ -2039,14 +2022,8 @@ void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
return result;
}
// glibc-2.0 libdl is not MT safe. If you are building with any glibc,
// chances are you might want to run the generated bits against glibc-2.0
// libdl.so, so always use locking for any version of glibc.
//
void* os::dll_lookup(void* handle, const char* name) {
pthread_mutex_lock(&dl_mutex);
void* res = dlsym(handle, name);
pthread_mutex_unlock(&dl_mutex);
return res;
}
@ -4655,8 +4632,6 @@ void os::init(void) {
}
// else it defaults to CLOCK_REALTIME
pthread_mutex_init(&dl_mutex, NULL);
// If the pagesize of the VM is greater than 8K determine the appropriate
// number of initial guard pages. The user can change this with the
// command line arguments, if needed.

View file

@ -555,17 +555,6 @@ bool os::bind_to_processor(uint processor_id) {
return (bind_result == 0);
}
bool os::getenv(const char* name, char* buffer, int len) {
char* val = ::getenv(name);
if (val == NULL || strlen(val) + 1 > len) {
if (len > 0) buffer[0] = 0; // return a null string
return false;
}
strcpy(buffer, val);
return true;
}
// Return true if user is running as root.
bool os::have_special_privileges() {

View file

@ -153,11 +153,6 @@ static inline double fileTimeAsDouble(FILETIME* time) {
// Implementation of os
bool os::getenv(const char* name, char* buffer, int len) {
int result = GetEnvironmentVariable(name, buffer, len);
return result > 0 && result < len;
}
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (SetEnvironmentVariable(name, NULL) == TRUE);
@ -188,9 +183,13 @@ void os::init_system_properties_values() {
char *dll_path;
char *pslash;
char *bin = "\\bin";
char home_dir[MAX_PATH];
char home_dir[MAX_PATH + 1];
char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
if (alt_home_dir != NULL) {
strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
home_dir[MAX_PATH] = '\0';
} else {
os::jvm_path(home_dir, sizeof(home_dir));
// Found the full path to jvm.dll.
// Now cut the path to <java_home>/jre if we can.
@ -2696,17 +2695,6 @@ address os::win32::fast_jni_accessor_wrapper(BasicType type) {
}
#endif
void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
// Install a win32 structured exception handler around the test
// function call so the VM can generate an error dump if needed.
__try {
(*funcPtr)();
} __except(topLevelExceptionFilter(
(_EXCEPTION_POINTERS*)_exception_info())) {
// Nothing to do.
}
}
// Virtual Memory
int os::vm_page_size() { return os::win32::vm_page_size(); }
@ -5930,4 +5918,3 @@ void TestReserveMemorySpecial_test() {
UseNUMAInterleaving = old_use_numa_interleaving;
}
#endif // PRODUCT

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -115,8 +115,6 @@ class win32 {
static address fast_jni_accessor_wrapper(BasicType);
#endif
static void call_test_func_with_wrapper(void (*funcPtr)(void));
// filter function to ignore faults on serializations page
static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
};

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -104,7 +104,4 @@ inline void os::exit(int num) {
win32::exit_process_or_thread(win32::EPT_PROCESS, num);
}
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
os::win32::call_test_func_with_wrapper(f)
#endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP

View file

@ -59,6 +59,10 @@
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
// See stubGenerator_zero.cpp
#include <setjmp.h>
extern sigjmp_buf* get_jmp_buf_for_continuation();
address os::current_stack_pointer() {
address dummy = (address) &dummy;
return dummy;
@ -134,6 +138,14 @@ JVM_handle_bsd_signal(int sig,
SignalHandlerMark shm(t);
// handle SafeFetch faults
if (sig == SIGSEGV || sig == SIGBUS) {
sigjmp_buf* const pjb = get_jmp_buf_for_continuation();
if (pjb) {
siglongjmp(*pjb, 1);
}
}
// Note: it's not uncommon that JNI code uses signal/sigset to
// install then restore certain signal handler (e.g. to temporarily
// block SIGPIPE, or have a SIGILL handler when detecting CPU

View file

@ -54,6 +54,10 @@
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
// See stubGenerator_zero.cpp
#include <setjmp.h>
extern sigjmp_buf* get_jmp_buf_for_continuation();
address os::current_stack_pointer() {
address dummy = (address) &dummy;
return dummy;
@ -125,6 +129,14 @@ JVM_handle_linux_signal(int sig,
SignalHandlerMark shm(t);
// handle SafeFetch faults
if (sig == SIGSEGV || sig == SIGBUS) {
sigjmp_buf* const pjb = get_jmp_buf_for_continuation();
if (pjb) {
siglongjmp(*pjb, 1);
}
}
// Note: it's not uncommon that JNI code uses signal/sigset to
// install then restore certain signal handler (e.g. to temporarily
// block SIGPIPE, or have a SIGILL handler when detecting CPU

View file

@ -4838,20 +4838,21 @@ void ClassFileParser::verify_legal_method_modifiers(
}
}
} else { // not interface
if (is_initializer) {
if (is_static || is_final || is_synchronized || is_native ||
is_abstract || (major_gte_15 && is_bridge)) {
is_illegal = true;
}
} else { // not initializer
if (is_abstract) {
if ((is_final || is_native || is_private || is_static ||
(major_gte_15 && (is_synchronized || is_strict)))) {
if (has_illegal_visibility(flags)) {
is_illegal = true;
} else {
if (is_initializer) {
if (is_static || is_final || is_synchronized || is_native ||
is_abstract || (major_gte_15 && is_bridge)) {
is_illegal = true;
}
}
if (has_illegal_visibility(flags)) {
is_illegal = true;
} else { // not initializer
if (is_abstract) {
if ((is_final || is_native || is_private || is_static ||
(major_gte_15 && (is_synchronized || is_strict)))) {
is_illegal = true;
}
}
}
}
}

View file

@ -1313,7 +1313,8 @@ static inline int version_at(unsigned int merged) {
}
static inline bool version_matches(Method* method, int version) {
return (method->constants()->version() == version && version < MAX_VERSION);
assert(version < MAX_VERSION, "version is too big");
return method != NULL && (method->constants()->version() == version);
}
static inline int get_line_number(Method* method, int bci) {
@ -1343,6 +1344,7 @@ class BacktraceBuilder: public StackObj {
typeArrayOop _methods;
typeArrayOop _bcis;
objArrayOop _mirrors;
typeArrayOop _cprefs; // needed to insulate method name against redefinition
int _index;
No_Safepoint_Verifier _nsv;
@ -1350,8 +1352,9 @@ class BacktraceBuilder: public StackObj {
enum {
trace_methods_offset = java_lang_Throwable::trace_methods_offset,
trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
trace_mirrors_offset = java_lang_Throwable::trace_mirrors_offset,
trace_cprefs_offset = java_lang_Throwable::trace_cprefs_offset,
trace_next_offset = java_lang_Throwable::trace_next_offset,
trace_size = java_lang_Throwable::trace_size,
trace_chunk_size = java_lang_Throwable::trace_chunk_size
@ -1373,9 +1376,14 @@ class BacktraceBuilder: public StackObj {
assert(mirrors != NULL, "mirror array should be initialized in backtrace");
return mirrors;
}
static typeArrayOop get_cprefs(objArrayHandle chunk) {
typeArrayOop cprefs = typeArrayOop(chunk->obj_at(trace_cprefs_offset));
assert(cprefs != NULL, "cprefs array should be initialized in backtrace");
return cprefs;
}
// constructor for new backtrace
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL) {
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _cprefs(NULL) {
expand(CHECK);
_backtrace = _head;
_index = 0;
@ -1385,6 +1393,7 @@ class BacktraceBuilder: public StackObj {
_methods = get_methods(backtrace);
_bcis = get_bcis(backtrace);
_mirrors = get_mirrors(backtrace);
_cprefs = get_cprefs(backtrace);
assert(_methods->length() == _bcis->length() &&
_methods->length() == _mirrors->length(),
"method and source information arrays should match");
@ -1410,17 +1419,22 @@ class BacktraceBuilder: public StackObj {
objArrayOop mirrors = oopFactory::new_objectArray(trace_chunk_size, CHECK);
objArrayHandle new_mirrors(THREAD, mirrors);
typeArrayOop cprefs = oopFactory::new_shortArray(trace_chunk_size, CHECK);
typeArrayHandle new_cprefs(THREAD, cprefs);
if (!old_head.is_null()) {
old_head->obj_at_put(trace_next_offset, new_head());
}
new_head->obj_at_put(trace_methods_offset, new_methods());
new_head->obj_at_put(trace_bcis_offset, new_bcis());
new_head->obj_at_put(trace_mirrors_offset, new_mirrors());
new_head->obj_at_put(trace_cprefs_offset, new_cprefs());
_head = new_head();
_methods = new_methods();
_bcis = new_bcis();
_mirrors = new_mirrors();
_cprefs = new_cprefs();
_index = 0;
}
@ -1440,8 +1454,9 @@ class BacktraceBuilder: public StackObj {
method = mhandle();
}
_methods->short_at_put(_index, method->method_idnum());
_methods->short_at_put(_index, method->orig_method_idnum());
_bcis->int_at_put(_index, merge_bci_and_version(bci, method->constants()->version()));
_cprefs->short_at_put(_index, method->name_index());
// We need to save the mirrors in the backtrace to keep the class
// from being unloaded while we still have this stack trace.
@ -1454,27 +1469,26 @@ class BacktraceBuilder: public StackObj {
// Print stack trace element to resource allocated buffer
char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
int method_id, int version, int bci) {
int method_id, int version, int bci, int cpref) {
// Get strings and string lengths
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
const char* klass_name = holder->external_name();
int buf_len = (int)strlen(klass_name);
// The method id may point to an obsolete method, can't get more stack information
Method* method = holder->method_with_idnum(method_id);
if (method == NULL) {
char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
// This is what the java code prints in this case - added Redefined
sprintf(buf, "\tat %s.null (Redefined)", klass_name);
return buf;
}
Method* method = holder->method_with_orig_idnum(method_id, version);
char* method_name = method->name()->as_C_string();
// The method can be NULL if the requested class version is gone
Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
char* method_name = sym->as_C_string();
buf_len += (int)strlen(method_name);
// Use specific ik version as a holder since the mirror might
// refer to version that is now obsolete and no longer accessible
// via the previous versions list.
holder = holder->get_klass_version(version);
char* source_file_name = NULL;
if (version_matches(method, version)) {
if (holder != NULL) {
Symbol* source = holder->source_file_name();
if (source != NULL) {
source_file_name = source->as_C_string();
@ -1516,17 +1530,18 @@ char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
}
void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror,
int method_id, int version, int bci) {
int method_id, int version, int bci, int cpref) {
ResourceMark rm;
char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci);
char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci, cpref);
st->print_cr("%s", buf);
}
void java_lang_Throwable::print_stack_element(outputStream *st, methodHandle method, int bci) {
Handle mirror = method->method_holder()->java_mirror();
int method_id = method->method_idnum();
int method_id = method->orig_method_idnum();
int version = method->constants()->version();
print_stack_element(st, mirror, method_id, version, bci);
int cpref = method->name_index();
print_stack_element(st, mirror, method_id, version, bci, cpref);
}
const char* java_lang_Throwable::no_stack_trace_message() {
@ -1551,6 +1566,7 @@ void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result));
typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result));
objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result));
typeArrayHandle cprefs (THREAD, BacktraceBuilder::get_cprefs(result));
int length = methods()->length();
for (int index = 0; index < length; index++) {
@ -1560,7 +1576,8 @@ void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
int method = methods->short_at(index);
int version = version_at(bcis->int_at(index));
int bci = bci_at(bcis->int_at(index));
print_stack_element(st, mirror, method, version, bci);
int cpref = cprefs->short_at(index);
print_stack_element(st, mirror, method, version, bci, cpref);
}
result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
}
@ -1837,29 +1854,30 @@ oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS
if (chunk == NULL) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
// Get method id, bci, version and mirror from chunk
// Get method id, bci, version, mirror and cpref from chunk
typeArrayOop methods = BacktraceBuilder::get_methods(chunk);
typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk);
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
typeArrayOop cprefs = BacktraceBuilder::get_cprefs(chunk);
assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check");
int method = methods->short_at(chunk_index);
int version = version_at(bcis->int_at(chunk_index));
int bci = bci_at(bcis->int_at(chunk_index));
int cpref = cprefs->short_at(chunk_index);
Handle mirror(THREAD, mirrors->obj_at(chunk_index));
// Chunk can be partial full
if (mirror.is_null()) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, CHECK_0);
oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, cpref, CHECK_0);
return element;
}
oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
int version, int bci, TRAPS) {
int version, int bci, int cpref, TRAPS) {
// Allocate java.lang.StackTraceElement instance
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
@ -1876,17 +1894,13 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
oop classname = StringTable::intern((char*) str, CHECK_0);
java_lang_StackTraceElement::set_declaringClass(element(), classname);
Method* method = holder->method_with_idnum(method_id);
// Method on stack may be obsolete because it was redefined so cannot be
// found by idnum.
if (method == NULL) {
// leave name and fileName null
java_lang_StackTraceElement::set_lineNumber(element(), -1);
return element();
}
Method* method = holder->method_with_orig_idnum(method_id, version);
// The method can be NULL if the requested class version is gone
Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
// Fill in method name
oop methodname = StringTable::intern(method->name(), CHECK_0);
oop methodname = StringTable::intern(sym, CHECK_0);
java_lang_StackTraceElement::set_methodName(element(), methodname);
if (!version_matches(method, version)) {
@ -1895,6 +1909,11 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
java_lang_StackTraceElement::set_lineNumber(element(), -1);
} else {
// Fill in source file name and line number.
// Use specific ik version as a holder since the mirror might
// refer to version that is now obsolete and no longer accessible
// via the previous versions list.
holder = holder->get_klass_version(version);
assert(holder != NULL, "sanity check");
Symbol* source = holder->source_file_name();
if (ShowHiddenFrames && source == NULL)
source = vmSymbols::unknown_class_name();
@ -1909,8 +1928,9 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
Handle mirror (THREAD, method->method_holder()->java_mirror());
int method_id = method->method_idnum();
return create(mirror, method_id, method->constants()->version(), bci, THREAD);
int method_id = method->orig_method_idnum();
int cpref = method->name_index();
return create(mirror, method_id, method->constants()->version(), bci, cpref, THREAD);
}
void java_lang_reflect_AccessibleObject::compute_offsets() {

View file

@ -485,8 +485,9 @@ class java_lang_Throwable: AllStatic {
trace_methods_offset = 0,
trace_bcis_offset = 1,
trace_mirrors_offset = 2,
trace_next_offset = 3,
trace_size = 4,
trace_cprefs_offset = 3,
trace_next_offset = 4,
trace_size = 5,
trace_chunk_size = 32
};
@ -497,7 +498,7 @@ class java_lang_Throwable: AllStatic {
static int static_unassigned_stacktrace_offset;
// Printing
static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci);
static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci, int cpref);
// StackTrace (programmatic access, new since 1.4)
static void clear_stacktrace(oop throwable);
// No stack trace available
@ -519,7 +520,7 @@ class java_lang_Throwable: AllStatic {
static void set_message(oop throwable, oop value);
static Symbol* detail_message(oop throwable);
static void print_stack_element(outputStream *st, Handle mirror, int method,
int version, int bci);
int version, int bci, int cpref);
static void print_stack_element(outputStream *st, methodHandle method, int bci);
static void print_stack_usage(Handle stream);
@ -1314,7 +1315,7 @@ class java_lang_StackTraceElement: AllStatic {
static void set_lineNumber(oop element, int value);
// Create an instance of StackTraceElement
static oop create(Handle mirror, int method, int version, int bci, TRAPS);
static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS);
static oop create(methodHandle method, int bci, TRAPS);
// Debugging

View file

@ -657,6 +657,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
bool this_uninit = false; // Set to true when invokespecial <init> initialized 'this'
bool verified_exc_handlers = false;
// Merge with the next instruction
{
@ -688,6 +689,18 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
}
}
// Look for possible jump target in exception handlers and see if it
// matches current_frame. Do this check here for astore*, dstore*,
// fstore*, istore*, and lstore* opcodes because they can change the type
// state by adding a local. JVM Spec says that the incoming type state
// should be used for this check. So, do the check here before a possible
// local is added to the type state.
if (Bytecodes::is_store_into_local(opcode) && bci >= ex_min && bci < ex_max) {
verify_exception_handler_targets(
bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
verified_exc_handlers = true;
}
switch (opcode) {
case Bytecodes::_nop :
no_control_flow = false; break;
@ -1669,9 +1682,13 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
} // end switch
} // end Merge with the next instruction
// Look for possible jump target in exception handlers and see if it
// matches current_frame
if (bci >= ex_min && bci < ex_max) {
// Look for possible jump target in exception handlers and see if it matches
// current_frame. Don't do this check if it has already been done (for
// ([a,d,f,i,l]store* opcodes). This check cannot be done earlier because
// opcodes, such as invokespecial, may set the this_uninit flag.
assert(!(verified_exc_handlers && this_uninit),
"Exception handler targets got verified before this_uninit got set");
if (!verified_exc_handlers && bci >= ex_min && bci < ex_max) {
verify_exception_handler_targets(
bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
}
@ -2236,14 +2253,20 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
}
// Look at the method's handlers. If the bci is in the handler's try block
// then check if the handler_pc is already on the stack. If not, push it.
// then check if the handler_pc is already on the stack. If not, push it
// unless the handler has already been scanned.
void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
GrowableArray<u4>* handler_list,
GrowableArray<u4>* handler_stack,
u4 bci) {
int exlength = exhandlers->length();
for(int x = 0; x < exlength; x++) {
if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
handler_stack->append_if_missing(exhandlers->handler_pc(x));
u4 exhandler_pc = exhandlers->handler_pc(x);
if (!handler_list->contains(exhandler_pc)) {
handler_stack->append_if_missing(exhandler_pc);
handler_list->append(exhandler_pc);
}
}
}
}
@ -2261,6 +2284,10 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
// Create stack for handlers for try blocks containing this handler.
GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
// Create list of handlers that have been pushed onto the handler_stack
// so that handlers embedded inside of their own TRY blocks only get
// scanned once.
GrowableArray<u4>* handler_list = new GrowableArray<u4>(30);
// Create list of visited branch opcodes (goto* and if*).
GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
ExceptionTable exhandlers(_method());
@ -2279,7 +2306,7 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
// If the bytecode is in a TRY block, push its handlers so they
// will get parsed.
push_handlers(&exhandlers, handler_stack, bci);
push_handlers(&exhandlers, handler_list, handler_stack, bci);
switch (opcode) {
case Bytecodes::_if_icmpeq:

View file

@ -305,9 +305,10 @@ class ClassVerifier : public StackObj {
bool* this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
TRAPS);
// Used by ends_in_athrow() to push all handlers that contain bci onto
// the handler_stack, if the handler is not already on the stack.
// Used by ends_in_athrow() to push all handlers that contain bci onto the
// handler_stack, if the handler has not already been pushed on the stack.
void push_handlers(ExceptionTable* exhandlers,
GrowableArray<u4>* handler_list,
GrowableArray<u4>* handler_stack,
u4 bci);

View file

@ -559,6 +559,7 @@
template(startRemoteAgent_name, "startRemoteManagementAgent") \
template(startLocalAgent_name, "startLocalManagementAgent") \
template(stopRemoteAgent_name, "stopRemoteManagementAgent") \
template(getAgentStatus_name, "getManagementAgentStatus") \
template(java_lang_management_ThreadInfo_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;)V") \
template(java_lang_management_ThreadInfo_with_locks_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;[Ljava/lang/Object;[I[Ljava/lang/Object;)V") \
template(long_long_long_long_void_signature, "(JJJJ)V") \

View file

@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.inline.hpp"

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/specialized_oop_closures.hpp"
// Generate CMS specialized oop_oop_iterate functions.
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)

View file

@ -32,6 +32,7 @@
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/resourceArea.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.inline.hpp"
@ -673,10 +674,10 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \
if (is_par) { \
assert(SharedHeap::heap()->n_par_threads() == \
SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
assert(GenCollectedHeap::heap()->n_par_threads() == \
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
walk_mem_region_with_cl_par(mr, bottom, top, cl); \
} else { \
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
@ -1907,11 +1908,11 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
assert(chunk->is_free() && ffc->is_free(), "Error");
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
if (rem_sz < SmallForDictionary) {
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
assert(!is_par ||
(SharedHeap::heap()->n_par_threads() ==
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
(GenCollectedHeap::heap()->n_par_threads() ==
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
returnChunkToFreeList(ffc);
split(size, rem_sz);
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
@ -1982,7 +1983,7 @@ void CompactibleFreeListSpace::save_marks() {
bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
assert(_promoInfo.tracking(), "No preceding save_marks?");
assert(SharedHeap::heap()->n_par_threads() == 0,
assert(GenCollectedHeap::heap()->n_par_threads() == 0,
"Shouldn't be called if using parallel gc.");
return _promoInfo.noPromotions();
}
@ -1991,7 +1992,7 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
\
void CompactibleFreeListSpace:: \
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
assert(SharedHeap::heap()->n_par_threads() == 0, \
assert(GenCollectedHeap::heap()->n_par_threads() == 0, \
"Shouldn't be called (yet) during parallel part of gc."); \
_promoInfo.promoted_oops_iterate##nv_suffix(blk); \
/* \
@ -2442,11 +2443,10 @@ void CompactibleFreeListSpace::verify() const {
{
VerifyAllOopsClosure cl(_collector, this, span, past_remark,
_collector->markBitMap());
CollectedHeap* ch = Universe::heap();
// Iterate over all oops in the heap. Uses the _no_header version
// since we are not interested in following the klass pointers.
ch->oop_iterate_no_header(&cl);
GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
}
if (VerifyObjectStartArray) {

View file

@ -28,7 +28,7 @@
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/blockOffsetTable.hpp"
#include "memory/freeList.hpp"
#include "memory/space.hpp"

View file

@ -53,6 +53,7 @@
#include "memory/padded.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/strongRootsScope.hpp"
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
@ -64,6 +65,7 @@
#include "runtime/vmThread.hpp"
#include "services/memoryService.hpp"
#include "services/runtimeService.hpp"
#include "utilities/stack.inline.hpp"
// statics
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
@ -208,10 +210,6 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
use_adaptive_freelists,
dictionaryChoice);
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
if (_cmsSpace == NULL) {
vm_exit_during_initialization(
"CompactibleFreeListSpace allocation failure");
}
_cmsSpace->_gen = this;
_gc_stats = new CMSGCStats();
@ -230,14 +228,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
_par_gc_thread_states =
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
if (_par_gc_thread_states == NULL) {
vm_exit_during_initialization("Could not allocate par gc structs");
}
for (uint i = 0; i < ParallelGCThreads; i++) {
_par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
if (_par_gc_thread_states[i] == NULL) {
vm_exit_during_initialization("Could not allocate par gc structs");
}
}
} else {
_par_gc_thread_states = NULL;
@ -308,8 +300,6 @@ void CMSCollector::ref_processor_init() {
AdaptiveSizePolicy* CMSCollector::size_policy() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"Wrong type of heap");
return gch->gen_policy()->size_policy();
}
@ -586,11 +576,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
return;
}
_hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
if (_hash_seed == NULL) {
warning("_hash_seed array allocation failure");
return;
}
typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
for (i = 0; i < num_queues; i++) {
PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
@ -633,12 +618,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_eden_chunk_index = 0;
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
if (_eden_chunk_array == NULL) {
_eden_chunk_capacity = 0;
warning("GC/CMS: _eden_chunk_array allocation failure");
}
}
assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
// Support for parallelizing survivor space rescan
if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
@ -648,52 +628,15 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
|| _cursor == NULL) {
warning("Failed to allocate survivor plab/chunk array");
if (_survivor_plab_array != NULL) {
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
_survivor_plab_array = NULL;
}
if (_survivor_chunk_array != NULL) {
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
_survivor_chunk_array = NULL;
}
if (_cursor != NULL) {
FREE_C_HEAP_ARRAY(size_t, _cursor);
_cursor = NULL;
}
} else {
_survivor_chunk_capacity = 2*max_plab_samples;
for (uint i = 0; i < ParallelGCThreads; i++) {
HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
if (vec == NULL) {
warning("Failed to allocate survivor plab array");
for (int j = i; j > 0; j--) {
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
}
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
_survivor_plab_array = NULL;
_survivor_chunk_array = NULL;
_survivor_chunk_capacity = 0;
break;
} else {
ChunkArray* cur =
::new (&_survivor_plab_array[i]) ChunkArray(vec,
max_plab_samples);
assert(cur->end() == 0, "Should be 0");
assert(cur->array() == vec, "Should be vec");
assert(cur->capacity() == max_plab_samples, "Error");
}
}
_survivor_chunk_capacity = 2*max_plab_samples;
for (uint i = 0; i < ParallelGCThreads; i++) {
HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
assert(cur->end() == 0, "Should be 0");
assert(cur->array() == vec, "Should be vec");
assert(cur->capacity() == max_plab_samples, "Error");
}
}
assert( ( _survivor_plab_array != NULL
&& _survivor_chunk_array != NULL)
|| ( _survivor_chunk_capacity == 0
&& _survivor_chunk_index == 0),
"Error");
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
_gc_counters = new CollectorCounters("CMS", 1);
@ -1037,7 +980,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert_lock_strong(freelistLock());
#ifndef PRODUCT
if (Universe::heap()->promotion_should_fail()) {
if (GenCollectedHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
@ -1114,7 +1057,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop old, markOop m,
size_t word_sz) {
#ifndef PRODUCT
if (Universe::heap()->promotion_should_fail()) {
if (GenCollectedHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
@ -2524,7 +2467,7 @@ void CMSCollector::verify_after_remark_work_1() {
verification_mark_bm()->iterate(&vcl);
if (vcl.failed()) {
gclog_or_tty->print("Verification failed");
Universe::heap()->print_on(gclog_or_tty);
gch->print_on(gclog_or_tty);
fatal("CMS: failed marking verification after remark");
}
}
@ -3071,10 +3014,10 @@ void CMSCollector::checkpointRootsInitialWork() {
gch->set_par_threads(n_workers);
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
if (n_workers > 1) {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
workers->run_task(&tsk);
} else {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
tsk.work(0);
}
gch->set_par_threads(0);
@ -5169,11 +5112,11 @@ void CMSCollector::do_remark_parallel() {
// necessarily be so, since it's possible that we are doing
// ST marking.
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
workers->run_task(&tsk);
} else {
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
tsk.work(0);
}
@ -5241,7 +5184,7 @@ void CMSCollector::do_remark_non_parallel() {
verify_work_stacks_empty();
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens as roots

View file

@ -38,8 +38,8 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/stack.inline.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/stack.hpp"
#include "utilities/taskqueue.hpp"
#include "utilities/yieldingWorkgroup.hpp"

View file

@ -27,7 +27,7 @@
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
#include "gc_implementation/shared/concurrentGCThread.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/thread.hpp"
class ConcurrentMarkSweepGeneration;
class CMSCollector;

View file

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "memory/genOopClosures.hpp"
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "oops/markOop.inline.hpp"

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,7 +62,7 @@ void VM_CMS_Operation::verify_before_gc() {
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
Universe::heap()->prepare_for_verify();
GenCollectedHeap::heap()->prepare_for_verify();
Universe::verify();
}
}

View file

@ -34,6 +34,7 @@
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
@ -46,6 +47,7 @@
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/strongRootsScope.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@ -115,7 +117,7 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
}
size_t CMBitMap::compute_size(size_t heap_size) {
return heap_size / mark_distance();
return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
}
size_t CMBitMap::mark_distance() {
@ -1325,7 +1327,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(before)");
}
@ -1352,7 +1354,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
// Verify the heap w.r.t. the previous marking bitmap.
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(overflow)");
}
@ -1378,7 +1380,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UseNextMarking,
" VerifyDuringGC:(after)");
}
@ -1986,13 +1988,13 @@ void ConcurrentMark::cleanup() {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(before)");
}
g1h->check_bitmaps("Cleanup Start");
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
G1CollectorPolicy* g1p = g1h->g1_policy();
g1p->record_concurrent_mark_cleanup_start();
double start = os::elapsedTime();
@ -2097,7 +2099,7 @@ void ConcurrentMark::cleanup() {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(after)");
}
@ -2650,7 +2652,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
g1h->ensure_parsability(false);
G1CollectedHeap::StrongRootsScope srs(g1h);
StrongRootsScope srs;
// this is remark, so we'll use up all active threads
uint active_workers = g1h->workers()->active_workers();
if (active_workers == 0) {
@ -3392,22 +3394,29 @@ void ConcurrentMark::print_finger() {
}
#endif
void CMTask::scan_object(oop obj) {
template<bool scan>
inline void CMTask::process_grey_object(oop obj) {
assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
_worker_id, p2i((void*) obj));
}
size_t obj_size = obj->size();
_words_scanned += obj_size;
obj->oop_iterate(_cm_oop_closure);
if (scan) {
obj->oop_iterate(_cm_oop_closure);
}
statsOnly( ++_objs_scanned );
check_limits();
}
template void CMTask::process_grey_object<true>(oop);
template void CMTask::process_grey_object<false>(oop);
// Closure for iteration over bitmaps
class CMBitMapClosure : public BitMapClosure {
private:

View file

@ -1100,6 +1100,12 @@ private:
void regular_clock_call();
bool concurrent() { return _concurrent; }
// Test whether objAddr might have already been passed over by the
// mark bitmap scan, and so needs to be pushed onto the mark stack.
bool is_below_finger(HeapWord* objAddr, HeapWord* global_finger) const;
template<bool scan> void process_grey_object(oop obj);
public:
// It resets the task; it should be called right at the beginning of
// a marking phase.
@ -1152,7 +1158,7 @@ public:
inline void deal_with_reference(oop obj);
// It scans an object and visits its children.
void scan_object(oop obj);
void scan_object(oop obj) { process_grey_object<true>(obj); }
// It pushes an object on the local queue.
inline void push(oop obj);

View file

@ -259,14 +259,35 @@ inline void CMTask::push(oop obj) {
++_local_pushes );
}
// This determines whether the method below will check both the local
// and global fingers when determining whether to push on the stack a
// gray object (value 1) or whether it will only check the global one
// (value 0). The tradeoffs are that the former will be a bit more
// accurate and possibly push less on the stack, but it might also be
// a little bit slower.
inline bool CMTask::is_below_finger(HeapWord* objAddr,
HeapWord* global_finger) const {
// If objAddr is above the global finger, then the mark bitmap scan
// will find it later, and no push is needed. Similarly, if we have
// a current region and objAddr is between the local finger and the
// end of the current region, then no push is needed. The tradeoff
// of checking both vs only checking the global finger is that the
// local check will be more accurate and so result in fewer pushes,
// but may also be a little slower.
if (_finger != NULL) {
// We have a current region.
#define _CHECK_BOTH_FINGERS_ 1
// Finger and region values are all NULL or all non-NULL. We
// use _finger to check since we immediately use its value.
assert(_curr_region != NULL, "invariant");
assert(_region_limit != NULL, "invariant");
assert(_region_limit <= global_finger, "invariant");
// True if objAddr is less than the local finger, or is between
// the region limit and the global finger.
if (objAddr < _finger) {
return true;
} else if (objAddr < _region_limit) {
return false;
} // Else check global finger.
}
// Check global finger.
return objAddr < global_finger;
}
inline void CMTask::deal_with_reference(oop obj) {
if (_cm->verbose_high()) {
@ -297,50 +318,43 @@ inline void CMTask::deal_with_reference(oop obj) {
// CAS done in CMBitMap::parMark() call in the routine above.
HeapWord* global_finger = _cm->finger();
#if _CHECK_BOTH_FINGERS_
// we will check both the local and global fingers
if (_finger != NULL && objAddr < _finger) {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] below the local finger ("PTR_FORMAT"), "
"pushing it", _worker_id, p2i(_finger));
// We only need to push a newly grey object on the mark
// stack if it is in a section of memory the mark bitmap
// scan has already examined. Mark bitmap scanning
// maintains progress "fingers" for determining that.
//
// Notice that the global finger might be moving forward
// concurrently. This is not a problem. In the worst case, we
// mark the object while it is above the global finger and, by
// the time we read the global finger, it has moved forward
// past this object. In this case, the object will probably
// be visited when a task is scanning the region and will also
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if (is_below_finger(objAddr, global_finger)) {
if (obj->is_typeArray()) {
// Immediately process arrays of primitive types, rather
// than pushing on the mark stack. This keeps us from
// adding humongous objects to the mark stack that might
// be reclaimed before the entry is processed - see
// selection of candidates for eager reclaim of humongous
// objects. The cost of the additional type test is
// mitigated by avoiding a trip through the mark stack,
// by only doing a bookkeeping update and avoiding the
// actual scan of the object - a typeArray contains no
// references, and the metadata is built-in.
process_grey_object<false>(obj);
} else {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT
", global: " PTR_FORMAT ") pushing "
PTR_FORMAT " on mark stack",
_worker_id, p2i(_finger),
p2i(global_finger), p2i(objAddr));
}
push(obj);
}
push(obj);
} else if (_curr_region != NULL && objAddr < _region_limit) {
// do nothing
} else if (objAddr < global_finger) {
// Notice that the global finger might be moving forward
// concurrently. This is not a problem. In the worst case, we
// mark the object while it is above the global finger and, by
// the time we read the global finger, it has moved forward
// passed this object. In this case, the object will probably
// be visited when a task is scanning the region and will also
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] below the global finger "
"("PTR_FORMAT"), pushing it",
_worker_id, p2i(global_finger));
}
push(obj);
} else {
// do nothing
}
#else // _CHECK_BOTH_FINGERS_
// we will only check the global finger
if (objAddr < global_finger) {
// see long comment above
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] below the global finger "
"("PTR_FORMAT"), pushing it",
_worker_id, p2i(global_finger));
}
push(obj);
}
#endif // _CHECK_BOTH_FINGERS_
}
}
}

View file

@ -29,6 +29,9 @@
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "gc_interface/collectedHeap.hpp"
class EvacuationInfo;
// Base class for G1 allocators.
class G1Allocator : public CHeapObj<mtGC> {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/space.hpp"
@ -303,9 +304,9 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
assert(blk_start <= threshold, "blk_start should be at or before threshold");
assert(pointer_delta(threshold, blk_start) <= N_words,
"offset should be <= BlockOffsetSharedArray::N");
assert(Universe::heap()->is_in_reserved(blk_start),
assert(G1CollectedHeap::heap()->is_in_reserved(blk_start),
"reference must be into the heap");
assert(Universe::heap()->is_in_reserved(blk_end-1),
assert(G1CollectedHeap::heap()->is_in_reserved(blk_end-1),
"limit must be within the heap");
assert(threshold == _array->_reserved.start() + index*N_words,
"index must agree with threshold");
@ -458,7 +459,7 @@ G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
_next_offset_index = _array->index_for_raw(_bottom);
_next_offset_index++;
@ -468,7 +469,7 @@ HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
}
void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
size_t bottom_index = _array->index_for_raw(_bottom);
assert(_array->address_for_index_raw(bottom_index) == _bottom,
@ -477,7 +478,7 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
_next_offset_index = _array->index_for(_bottom);
_next_offset_index++;

View file

@ -70,6 +70,7 @@
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/stack.inline.hpp"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
@ -1728,7 +1729,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
SharedHeap(),
CollectedHeap(),
_g1_policy(policy_),
_dirty_card_queue_set(false),
_into_cset_dirty_card_queue_set(false),
@ -1746,7 +1747,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
_humongous_is_live(),
_humongous_reclaim_candidates(),
_has_humongous_reclaim_candidates(false),
_free_regions_coming(false),
_young_list(new YoungList(this)),
@ -1770,6 +1771,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_g1h = this;
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
_workers->initialize_workers();
_allocator = G1Allocator::create_allocator(_g1h);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
@ -1797,6 +1803,26 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
guarantee(_task_queues != NULL, "task_queues allocation failure.");
}
G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
size_t size,
size_t translation_factor) {
size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
// Allocate a new reserved space, preferring to use large pages.
ReservedSpace rs(size, preferred_page_size);
G1RegionToSpaceMapper* result =
G1RegionToSpaceMapper::create_mapper(rs,
size,
rs.alignment(),
HeapRegion::GrainBytes,
translation_factor,
mtGC);
if (TracePageSizes) {
gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
}
return result;
}
jint G1CollectedHeap::initialize() {
CollectedHeap::pre_initialize();
os::enable_vtime();
@ -1864,57 +1890,35 @@ jint G1CollectedHeap::initialize() {
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(g1_rs,
g1_rs.size(),
UseLargePages ? os::large_page_size() : os::vm_page_size(),
HeapRegion::GrainBytes,
1,
mtJavaHeap);
heap_storage->set_mapping_changed_listener(&_listener);
// Reserve space for the block offset table. We do not support automatic uncommit
// for the card table at this time. BOT only.
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
mtGC);
create_aux_memory_mapper("Block offset table",
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
G1BlockOffsetSharedArray::N_bytes);
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* cardtable_storage =
G1RegionToSpaceMapper::create_mapper(cardtable_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
mtGC);
create_aux_memory_mapper("Card table",
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
G1BlockOffsetSharedArray::N_bytes);
// Reserve space for the card counts table.
ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* card_counts_storage =
G1RegionToSpaceMapper::create_mapper(card_counts_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
mtGC);
create_aux_memory_mapper("Card counts table",
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
G1BlockOffsetSharedArray::N_bytes);
// Reserve space for prev and next bitmap.
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
G1RegionToSpaceMapper* prev_bitmap_storage =
G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
CMBitMap::mark_distance(),
mtGC);
ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
G1RegionToSpaceMapper* next_bitmap_storage =
G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
CMBitMap::mark_distance(),
mtGC);
create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
g1_barrier_set()->initialize(cardtable_storage);
@ -1937,8 +1941,14 @@ jint G1CollectedHeap::initialize() {
_g1h = this;
_in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
_humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
{
HeapWord* start = _hrm.reserved().start();
HeapWord* end = _hrm.reserved().end();
size_t granularity = HeapRegion::GrainBytes;
_in_cset_fast_test.initialize(start, end, granularity);
_humongous_reclaim_candidates.initialize(start, end, granularity);
}
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
@ -2026,15 +2036,15 @@ void G1CollectedHeap::stop() {
}
}
void G1CollectedHeap::clear_humongous_is_live_table() {
guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
_humongous_is_live.clear();
}
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
void G1CollectedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
}
void G1CollectedHeap::ref_processing_init() {
// Reference processing in G1 currently works as follows:
//
@ -2071,7 +2081,6 @@ void G1CollectedHeap::ref_processing_init() {
// * Discovery is atomic - i.e. not concurrent.
// * Reference discovery will not need a barrier.
SharedHeap::ref_processing_init();
MemRegion mr = reserved_region();
// Concurrent Mark ref processor
@ -2128,6 +2137,7 @@ void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
}
#ifndef PRODUCT
class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
private:
unsigned _gc_time_stamp;
@ -2462,11 +2472,6 @@ public:
}
};
void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
IterateOopClosureRegionClosure blk(cl);
heap_region_iterate(&blk);
}
// Iterates an ObjectClosure over all objects within a HeapRegion.
class IterateObjectClosureRegionClosure: public HeapRegionClosure {
@ -2486,23 +2491,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
heap_region_iterate(&blk);
}
// Calls a SpaceClosure on a HeapRegion.
class SpaceClosureRegionClosure: public HeapRegionClosure {
SpaceClosure* _cl;
public:
SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
bool doHeapRegion(HeapRegion* r) {
_cl->do_space(r);
return false;
}
};
void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
SpaceClosureRegionClosure blk(cl);
heap_region_iterate(&blk);
}
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
_hrm.iterate(cl);
}
@ -2639,23 +2627,19 @@ HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) cons
return result;
}
Space* G1CollectedHeap::space_containing(const void* addr) const {
return heap_region_containing(addr);
}
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
Space* sp = space_containing(addr);
return sp->block_start(addr);
HeapRegion* hr = heap_region_containing(addr);
return hr->block_start(addr);
}
size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
Space* sp = space_containing(addr);
return sp->block_size(addr);
HeapRegion* hr = heap_region_containing(addr);
return hr->block_size(addr);
}
bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
Space* sp = space_containing(addr);
return sp->block_is_obj(addr);
HeapRegion* hr = heap_region_containing(addr);
return hr->block_is_obj(addr);
}
bool G1CollectedHeap::supports_tlab_allocation() const {
@ -3336,8 +3320,8 @@ void G1CollectedHeap::print_all_rsets() {
#endif // PRODUCT
G1CollectedHeap* G1CollectedHeap::heap() {
assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
"not a garbage-first heap");
assert(_g1h != NULL, "Uninitialized access to G1CollectedHeap::heap()");
assert(_g1h->kind() == CollectedHeap::G1CollectedHeap, "Not a G1 heap");
return _g1h;
}
@ -3434,12 +3418,6 @@ size_t G1CollectedHeap::cards_scanned() {
return g1_rem_set()->cardsScanned();
}
bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
HeapRegion* region = region_at(index);
assert(region->is_starts_humongous(), "Must start a humongous object");
return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
}
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
private:
size_t _total_humongous;
@ -3447,14 +3425,59 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
DirtyCardQueue _dcq;
bool humongous_region_is_candidate(uint index) {
HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
assert(region->is_starts_humongous(), "Must start a humongous object");
// We don't nominate objects with many remembered set entries, on
// the assumption that such objects are likely still live.
bool is_remset_small(HeapRegion* region) const {
HeapRegionRemSet* const rset = region->rem_set();
bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs;
return !oop(region->bottom())->is_objArray() &&
((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
(!allow_stale_refs && rset->is_empty()));
return G1EagerReclaimHumongousObjectsWithStaleRefs
? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
: rset->is_empty();
}
bool is_typeArray_region(HeapRegion* region) const {
return oop(region->bottom())->is_typeArray();
}
bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
assert(region->is_starts_humongous(), "Must start a humongous object");
// Candidate selection must satisfy the following constraints
// while concurrent marking is in progress:
//
// * In order to maintain SATB invariants, an object must not be
// reclaimed if it was allocated before the start of marking and
// has not had its references scanned. Such an object must have
// its references (including type metadata) scanned to ensure no
// live objects are missed by the marking process. Objects
// allocated after the start of concurrent marking don't need to
// be scanned.
//
// * An object must not be reclaimed if it is on the concurrent
// mark stack. Objects allocated after the start of concurrent
// marking are never pushed on the mark stack.
//
// Nominating only objects allocated after the start of concurrent
// marking is sufficient to meet both constraints. This may miss
// some objects that satisfy the constraints, but the marking data
// structures don't support efficiently performing the needed
// additional tests or scrubbing of the mark stack.
//
// However, we presently only nominate is_typeArray() objects.
// A humongous object containing references induces remembered
// set entries on other regions. In order to reclaim such an
// object, those remembered sets would need to be cleaned up.
//
// We also treat is_typeArray() objects specially, allowing them
// to be reclaimed even if allocated before the start of
// concurrent mark. For this we rely on mark stack insertion to
// exclude is_typeArray() objects, preventing reclaiming an object
// that is in the mark stack. We also rely on the metadata for
// such objects to be built-in and so ensured to be kept live.
// Frequent allocation and drop of large binary blobs is an
// important use case for eager reclaim, and this special handling
// may reduce needed headroom.
return is_typeArray_region(region) && is_remset_small(region);
}
public:
@ -3470,14 +3493,17 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
uint region_idx = r->hrm_index();
bool is_candidate = humongous_region_is_candidate(region_idx);
// Is_candidate already filters out humongous object with large remembered sets.
// If we have a humongous object with a few remembered sets, we simply flush these
// remembered set entries into the DCQS. That will result in automatic
// re-evaluation of their remembered set entries during the following evacuation
// phase.
bool is_candidate = humongous_region_is_candidate(g1h, r);
uint rindex = r->hrm_index();
g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
if (is_candidate) {
_candidate_humongous++;
g1h->register_humongous_region_with_cset(rindex);
// Is_candidate already filters out humongous object with large remembered sets.
// If we have a humongous object with a few remembered sets, we simply flush these
// remembered set entries into the DCQS. That will result in automatic
// re-evaluation of their remembered set entries during the following evacuation
// phase.
if (!r->rem_set()->is_empty()) {
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
"Found a not-small remembered set here. This is inconsistent with previous assumptions.");
@ -3499,8 +3525,6 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
r->rem_set()->clear_locked();
}
assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
g1h->register_humongous_region_with_cset(region_idx);
_candidate_humongous++;
}
_total_humongous++;
@ -3520,6 +3544,7 @@ void G1CollectedHeap::register_humongous_regions_with_cset() {
}
double time = os::elapsed_counter();
// Collect reclaim candidate information and register candidates with cset.
RegisterHumongousWithInCSetFastTestClosure cl;
heap_region_iterate(&cl);
@ -3529,10 +3554,6 @@ void G1CollectedHeap::register_humongous_regions_with_cset() {
cl.candidate_humongous());
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
clear_humongous_is_live_table();
}
// Finally flush all remembered set entries to re-check into the global DCQS.
cl.flush_rem_set_entries();
}
@ -5994,11 +6015,11 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
// required because stale remembered sets might reference locations that
// are currently allocated into.
uint region_idx = r->hrm_index();
if (g1h->humongous_is_live(region_idx) ||
g1h->humongous_region_is_always_live(region_idx)) {
if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
!r->rem_set()->is_empty()) {
if (G1TraceEagerReclaimHumongousObjects) {
gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx,
obj->size()*HeapWordSize,
r->bottom(),
@ -6006,20 +6027,21 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()),
g1h->humongous_is_live(region_idx),
obj->is_objArray()
g1h->is_humongous_reclaim_candidate(region_idx),
obj->is_typeArray()
);
}
return false;
}
guarantee(!obj->is_objArray(),
err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
guarantee(obj->is_typeArray(),
err_msg("Only eagerly reclaiming type arrays is supported, but the object "
PTR_FORMAT " is not.",
r->bottom()));
if (G1TraceEagerReclaimHumongousObjects) {
gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx,
obj->size()*HeapWordSize,
r->bottom(),
@ -6027,8 +6049,8 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()),
g1h->humongous_is_live(region_idx),
obj->is_objArray()
g1h->is_humongous_reclaim_candidate(region_idx),
obj->is_typeArray()
);
}
// Need to clear mark bit of the humongous object if already set.
@ -6163,8 +6185,6 @@ void G1CollectedHeap::wait_while_free_regions_coming() {
}
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
assert(heap_lock_held_for_gc(),
"the heap lock should already be held by or for this thread");
_young_list->push_region(hr);
}

View file

@ -40,9 +40,9 @@
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/barrierSet.hpp"
#include "memory/memRegion.hpp"
#include "memory/sharedHeap.hpp"
#include "utilities/stack.hpp"
// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
@ -76,6 +76,7 @@ class G1OldTracer;
class EvacuationFailedInfo;
class nmethod;
class Ticks;
class FlexibleWorkGang;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@ -177,7 +178,7 @@ class G1RegionMappingChangedListener : public G1MappingChangedListener {
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
};
class G1CollectedHeap : public SharedHeap {
class G1CollectedHeap : public CollectedHeap {
friend class VM_CollectForMetadataAllocation;
friend class VM_G1CollectForAllocation;
friend class VM_G1CollectFull;
@ -204,6 +205,8 @@ private:
// The one and only G1CollectedHeap, so static functions can find it.
static G1CollectedHeap* _g1h;
FlexibleWorkGang* _workers;
static size_t _humongous_object_threshold_in_words;
// The secondary free list which contains regions that have been
@ -217,7 +220,6 @@ private:
// It keeps track of the humongous regions.
HeapRegionSet _humongous_set;
void clear_humongous_is_live_table();
void eagerly_reclaim_humongous_regions();
// The number of regions we could create by expansion.
@ -287,22 +289,26 @@ private:
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
// Records whether the region at the given index is kept live by roots or
// references from the young generation.
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
// Records whether the region at the given index is (still) a
// candidate for eager reclaim. Only valid for humongous start
// regions; other regions have unspecified values. Humongous start
// regions are initialized at start of collection pause, with
// candidates removed from the set as they are found reachable from
// roots or the young generation.
class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
protected:
bool default_value() const { return false; }
public:
void clear() { G1BiasedMappedArray<bool>::clear(); }
void set_live(uint region) {
set_by_index(region, true);
void set_candidate(uint region, bool value) {
set_by_index(region, value);
}
bool is_live(uint region) {
bool is_candidate(uint region) {
return get_by_index(region);
}
};
HumongousIsLiveBiasedMappedArray _humongous_is_live;
HumongousReclaimCandidates _humongous_reclaim_candidates;
// Stores whether during humongous object registration we found candidate regions.
// If not, we can skip a few steps.
bool _has_humongous_reclaim_candidates;
@ -351,6 +357,12 @@ private:
// heap after a compaction.
void print_hrm_post_compaction();
// Create a memory mapper for auxiliary data structures of the given size and
// translation factor.
static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
size_t size,
size_t translation_factor);
double verify(bool guard, const char* msg);
void verify_before_gc();
void verify_after_gc();
@ -605,6 +617,7 @@ protected:
void enqueue_discovered_references(uint no_of_gc_workers);
public:
FlexibleWorkGang* workers() const { return _workers; }
G1Allocator* allocator() {
return _allocator;
@ -630,21 +643,18 @@ public:
inline AllocationContextStats& allocation_context_stats();
// Do anything common to GC's.
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
void gc_prologue(bool full);
void gc_epilogue(bool full);
// Modify the reclaim candidate set and test for presence.
// These are only valid for starts_humongous regions.
inline void set_humongous_reclaim_candidate(uint region, bool value);
inline bool is_humongous_reclaim_candidate(uint region);
// Remove from the reclaim candidate set. Also remove from the
// collection set so that later encounters avoid the slow path.
inline void set_humongous_is_live(oop obj);
bool humongous_is_live(uint region) {
return _humongous_is_live.is_live(region);
}
// Returns whether the given region (which must be a humongous (start) region)
// is to be considered conservatively live regardless of any other conditions.
bool humongous_region_is_always_live(uint index);
// Returns whether the given region (which must be a humongous (start) region)
// is considered a candidate for eager reclamation.
bool humongous_region_is_candidate(uint index);
// Register the given region to be part of the collection set.
inline void register_humongous_region_with_cset(uint index);
// Register regions with humongous objects (actually on the start region) in
@ -1000,11 +1010,14 @@ public:
// Return the (conservative) maximum heap alignment for any G1 heap
static size_t conservative_max_heap_alignment();
// Does operations required after initialization has been done.
void post_initialize();
// Initialize weak reference processing.
virtual void ref_processing_init();
void ref_processing_init();
// Explicitly import set_par_threads into this scope
using SharedHeap::set_par_threads;
using CollectedHeap::set_par_threads;
// Set _n_par_threads according to a policy TBD.
void set_par_threads();
@ -1251,10 +1264,6 @@ public:
// Iteration functions.
// Iterate over all the ref-containing fields of all objects, calling
// "cl.do_oop" on each.
virtual void oop_iterate(ExtendedOopClosure* cl);
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl);
@ -1262,9 +1271,6 @@ public:
object_iterate(cl);
}
// Iterate over all spaces in use in the heap, in ascending address order.
virtual void space_iterate(SpaceClosure* cl);
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk) const;
@ -1307,10 +1313,6 @@ public:
HeapRegion* next_compaction_region(const HeapRegion* from) const;
// A CollectedHeap will contain some number of spaces. This finds the
// space containing a given address, or else returns NULL.
virtual Space* space_containing(const void* addr) const;
// Returns the HeapRegion that contains addr. addr must not be NULL.
template <class T>
inline HeapRegion* heap_region_containing_raw(const T addr) const;
@ -1344,9 +1346,6 @@ public:
// the block is an object.
virtual bool block_is_obj(const HeapWord* addr) const;
// Does this heap support heap inspection? (+PrintClassHistogram)
virtual bool supports_heap_inspection() const { return true; }
// Section on thread-local allocation buffers (TLABs)
// See CollectedHeap for semantics.

View file

@ -352,20 +352,30 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
return is_obj_ill(obj, heap_region_containing(obj));
}
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
_humongous_reclaim_candidates.set_candidate(region, value);
}
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
return _humongous_reclaim_candidates.is_candidate(region);
}
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
uint region = addr_to_region((HeapWord*)obj);
// We not only set the "live" flag in the humongous_is_live table, but also
// Clear the flag in the humongous_reclaim_candidates table. Also
// reset the entry in the _in_cset_fast_test table so that subsequent references
// to the same humongous object do not go into the slow path again.
// This is racy, as multiple threads may at the same time enter here, but this
// is benign.
// During collection we only ever set the "live" flag, and only ever clear the
// During collection we only ever clear the "candidate" flag, and only ever clear the
// entry in the in_cset_fast_table.
// We only ever evaluate the contents of these tables (in the VM thread) after
// having synchronized the worker threads with the VM thread, or in the same
// thread (i.e. within the VM thread).
if (!_humongous_is_live.is_live(region)) {
_humongous_is_live.set_live(region);
if (is_humongous_reclaim_candidate(region)) {
set_humongous_reclaim_candidate(region, false);
_in_cset_fast_test.clear_humongous(region);
}
}

View file

@ -1460,7 +1460,7 @@ void G1CollectorPolicy::update_survivors_policy() {
_max_survivor_regions = (uint) ceil(max_survivor_regions_d);
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
HeapRegion::GrainWords * _max_survivor_regions);
HeapRegion::GrainWords * _max_survivor_regions, counters());
}
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -263,7 +263,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[CodeCacheRoots] = new WorkerDataArray<double>(max_gc_threads, "CodeCache Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms)", true, G1Log::LevelFinest, 3);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
SystemDictionaryRoots,
CLDGRoots,
JVMTIRoots,
CodeCacheRoots,
CMRefRoots,
WaitForStrongCLD,
WeakCLDRoots,

View file

@ -29,7 +29,7 @@
#include "gc_implementation/g1/g1CardCounts.hpp"
#include "memory/allocation.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/globalDefinitions.hpp"
class DirtyCardQueue;
@ -123,7 +123,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
// Resets the hot card cache and discards the entries.
void reset_hot_cache() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
assert(Thread::current_noinline()->is_VM_thread(), "Current thread should be the VMthread");
if (default_use_cache()) {
reset_hot_cache_internal();
}

View file

@ -61,9 +61,8 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
SharedHeap* sh = SharedHeap::heap();
#ifdef ASSERT
if (sh->collector_policy()->should_clear_all_soft_refs()) {
if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earler");
}
#endif
@ -102,11 +101,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
BiasedLocking::restore_marks();
GenMarkSweep::deallocate_stacks();
// "free at last gc" is calculated from these.
// CHF: cheating for now!!!
// Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
// Universe::set_heap_used_at_last_gc(Universe::heap()->used());
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
@ -168,12 +162,12 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
// Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
if (VerifyDuringGC) {
HandleMark hm; // handle scope
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
Universe::heap()->prepare_for_verify();
g1h->prepare_for_verify();
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
@ -187,7 +181,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
if (!VerifySilently) {
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
}
Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
g1h->verify(VerifySilently, VerifyOption_G1UseMarkWord);
if (!VerifySilently) {
gclog_or_tty->print_cr("]");
}

View file

@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/genMarkSweep.hpp"
#include "memory/generation.hpp"

View file

@ -23,9 +23,12 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1ParScanThreadState.hpp"
#include "memory/iterator.inline.hpp"
#include "utilities/stack.inline.hpp"
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
@ -50,3 +53,6 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
}
// Generate G1 specialized oop_oop_iterate functions.
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(ALL_KLASS_OOP_OOP_ITERATE_DEFN)

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -172,7 +172,7 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
assert(_g1->is_in_reserved(obj), "must be in heap");
#endif // ASSERT
assert(_from != NULL, "from region must be non-NULL");

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,37 +44,45 @@
#endif
#include "utilities/bitMap.inline.hpp"
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
_high_boundary(NULL), _committed(), _page_size(0), _special(false),
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
_low_boundary(NULL), _high_boundary(NULL), _committed(), _page_size(0), _special(false),
_dirty(), _executable(false) {
initialize_with_page_size(rs, used_size, page_size);
}
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
if (!rs.is_reserved()) {
return false; // Allocation failed.
}
assert(_low_boundary == NULL, "VirtualSpace already initialized");
assert(page_size > 0, "Granularity must be non-zero.");
void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
vmassert(page_size > 0, "Page size must be non-zero.");
guarantee(is_ptr_aligned(rs.base(), page_size),
err_msg("Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size));
guarantee(is_size_aligned(used_size, os::vm_page_size()),
err_msg("Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size));
guarantee(used_size <= rs.size(),
err_msg("Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()));
guarantee(is_size_aligned(rs.size(), page_size),
err_msg("Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size));
_low_boundary = rs.base();
_high_boundary = _low_boundary + rs.size();
_high_boundary = _low_boundary + used_size;
_special = rs.special();
_executable = rs.executable();
_page_size = page_size;
assert(_committed.size() == 0, "virtual space initialized more than once");
uintx size_in_bits = rs.size() / page_size;
_committed.resize(size_in_bits, /* in_resource_area */ false);
vmassert(_committed.size() == 0, "virtual space initialized more than once");
BitMap::idx_t size_in_pages = rs.size() / page_size;
_committed.resize(size_in_pages, /* in_resource_area */ false);
if (_special) {
_dirty.resize(size_in_bits, /* in_resource_area */ false);
_dirty.resize(size_in_pages, /* in_resource_area */ false);
}
return true;
_tail_size = used_size % _page_size;
}
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
release();
}
@ -87,12 +95,18 @@ void G1PageBasedVirtualSpace::release() {
_special = false;
_executable = false;
_page_size = 0;
_tail_size = 0;
_committed.resize(0, false);
_dirty.resize(0, false);
}
size_t G1PageBasedVirtualSpace::committed_size() const {
return _committed.count_one_bits() * _page_size;
size_t result = _committed.count_one_bits() * _page_size;
// The last page might not be in full.
if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
result -= _page_size - _tail_size;
}
return result;
}
size_t G1PageBasedVirtualSpace::reserved_size() const {
@ -103,65 +117,134 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const {
return reserved_size() - committed_size();
}
uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
return (addr - _low_boundary) / _page_size;
}
bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
uintptr_t end = start + size_in_pages;
return _committed.get_next_zero_offset(start, end) >= end;
bool G1PageBasedVirtualSpace::is_area_committed(size_t start_page, size_t size_in_pages) const {
size_t end_page = start_page + size_in_pages;
return _committed.get_next_zero_offset(start_page, end_page) >= end_page;
}
bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
uintptr_t end = start + size_in_pages;
return _committed.get_next_one_offset(start, end) >= end;
bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start_page, size_t size_in_pages) const {
size_t end_page = start_page + size_in_pages;
return _committed.get_next_one_offset(start_page, end_page) >= end_page;
}
char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
char* G1PageBasedVirtualSpace::page_start(size_t index) const {
return _low_boundary + index * _page_size;
}
size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
return num * _page_size;
bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const {
guarantee(index <= _committed.size(),
err_msg("Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size()));
return index == _committed.size();
}
bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pages) {
vmassert(num_pages > 0, "No full pages to commit");
vmassert(start + num_pages <= _committed.size(),
err_msg("Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " "
"that is outside of managed space of " SIZE_FORMAT " pages",
start, start + num_pages, _committed.size()));
char* start_addr = page_start(start);
size_t size = num_pages * _page_size;
os::commit_memory_or_exit(start_addr, size, _page_size, _executable,
err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
p2i(start_addr), p2i(start_addr + size), size));
}
void G1PageBasedVirtualSpace::commit_tail() {
vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size);
os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
p2i(aligned_end_address), p2i(_high_boundary), _tail_size));
}
void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) {
guarantee(start_page < end_page,
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
guarantee(end_page <= _committed.size(),
err_msg("Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size()));
size_t pages = end_page - start_page;
bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial();
// If we have to commit some (partial) tail area, decrease the amount of pages to avoid
// committing that in the full-page commit code.
if (need_to_commit_tail) {
pages--;
}
if (pages > 0) {
commit_preferred_pages(start_page, pages);
}
if (need_to_commit_tail) {
commit_tail();
}
}
char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
return MIN2(_high_boundary, page_start(end_page));
}
void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
guarantee(start_page < end_page,
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page));
}
bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
// We need to make sure to commit all pages covered by the given area.
guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
bool zero_filled = true;
uintptr_t end = start + size_in_pages;
size_t end_page = start_page + size_in_pages;
if (_special) {
// Check for dirty pages and update zero_filled if any found.
if (_dirty.get_next_one_offset(start,end) < end) {
if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
zero_filled = false;
_dirty.clear_range(start, end);
_dirty.clear_range(start_page, end_page);
}
} else {
os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
commit_internal(start_page, end_page);
}
_committed.set_range(start, end);
_committed.set_range(start_page, end_page);
if (AlwaysPreTouch) {
os::pretouch_memory(page_start(start), page_start(end));
pretouch_internal(start_page, end_page);
}
return zero_filled;
}
void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
guarantee(is_area_committed(start, size_in_pages), "checking");
void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
guarantee(start_page < end_page,
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
char* start_addr = page_start(start_page);
os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
}
void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
guarantee(is_area_committed(start_page, size_in_pages), "checking");
size_t end_page = start_page + size_in_pages;
if (_special) {
// Mark that memory is dirty. If committed again the memory might
// need to be cleared explicitly.
_dirty.set_range(start, start + size_in_pages);
_dirty.set_range(start_page, end_page);
} else {
os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
uncommit_internal(start_page, end_page);
}
_committed.clear_range(start, start + size_in_pages);
_committed.clear_range(start_page, end_page);
}
bool G1PageBasedVirtualSpace::contains(const void* p) const {
@ -175,7 +258,8 @@ void G1PageBasedVirtualSpace::print_on(outputStream* out) {
out->cr();
out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size);
out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
}
void G1PageBasedVirtualSpace::print() {

View file

@ -34,6 +34,12 @@
// granularity.
// (De-)Allocation requests are always OS page aligned by passing a page index
// and multiples of pages.
// For systems that only commits of memory in a given size (always greater than
// page size) the base address is required to be aligned to that page size.
// The actual size requested need not be aligned to that page size, but the size
// of the reservation passed may be rounded up to this page size. Any fragment
// (less than the page size) of the actual size at the tail of the request will
// be committed using OS small pages.
// The implementation gives an error when trying to commit or uncommit pages that
// have already been committed or uncommitted.
class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
@ -43,7 +49,11 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
char* _low_boundary;
char* _high_boundary;
// The commit/uncommit granularity in bytes.
// The size of the tail in bytes of the handled space that needs to be committed
// using small pages.
size_t _tail_size;
// The preferred page size used for commit/uncommit in bytes.
size_t _page_size;
// Bitmap used for verification of commit/uncommit operations.
@ -62,30 +72,55 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
// Indicates whether the committed space should be executable.
bool _executable;
// Helper function for committing memory. Commit the given memory range by using
// _page_size pages as much as possible and the remainder with small sized pages.
void commit_internal(size_t start_page, size_t end_page);
// Commit num_pages pages of _page_size size starting from start. All argument
// checking has been performed.
void commit_preferred_pages(size_t start_page, size_t end_page);
// Commit space at the high end of the space that needs to be committed with small
// sized pages.
void commit_tail();
// Uncommit the given memory range.
void uncommit_internal(size_t start_page, size_t end_page);
// Pretouch the given memory range.
void pretouch_internal(size_t start_page, size_t end_page);
// Returns the index of the page which contains the given address.
uintptr_t addr_to_page_index(char* addr) const;
// Returns the address of the given page index.
char* page_start(uintptr_t index);
// Returns the byte size of the given number of pages.
size_t byte_size_for_pages(size_t num);
char* page_start(size_t index) const;
// Is the given page index the last page?
bool is_last_page(size_t index) const { return index == (_committed.size() - 1); }
// Is the given page index the first after last page?
bool is_after_last_page(size_t index) const;
// Is the last page only partially covered by this space?
bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); }
// Returns the end address of the given page bounded by the reserved space.
char* bounded_end_addr(size_t end_page) const;
// Returns true if the entire area is backed by committed memory.
bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
bool is_area_committed(size_t start_page, size_t size_in_pages) const;
// Returns true if the entire area is not backed by committed memory.
bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
bool is_area_uncommitted(size_t start_page, size_t size_in_pages) const;
void initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size);
public:
// Commit the given area of pages starting at start being size_in_pages large.
// Returns true if the given area is zero filled upon completion.
bool commit(uintptr_t start, size_t size_in_pages);
bool commit(size_t start_page, size_t size_in_pages);
// Uncommit the given area of pages starting at start being size_in_pages large.
void uncommit(uintptr_t start, size_t size_in_pages);
void uncommit(size_t start_page, size_t size_in_pages);
// Initialization
G1PageBasedVirtualSpace();
bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
// Initialize the given reserved space with the given base address and the size
// actually used.
// Prefer to commit in page_size chunks.
G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size);
// Destruction
~G1PageBasedVirtualSpace();

View file

@ -26,8 +26,10 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/prefetch.inline.hpp"
#include "utilities/stack.inline.hpp"
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
: _g1h(g1h),

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,17 +31,16 @@
#include "utilities/bitMap.inline.hpp"
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
size_t commit_granularity,
size_t used_size,
size_t page_size,
size_t region_granularity,
MemoryType type) :
_storage(),
_commit_granularity(commit_granularity),
_storage(rs, used_size, page_size),
_region_granularity(region_granularity),
_listener(NULL),
_commit_map() {
guarantee(is_power_of_2(commit_granularity), "must be");
guarantee(is_power_of_2(page_size), "must be");
guarantee(is_power_of_2(region_granularity), "must be");
_storage.initialize_with_granularity(rs, commit_granularity);
MemTracker::record_virtual_memory_type((address)rs.base(), type);
}
@ -55,25 +54,26 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public:
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t actual_size,
size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
_pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
_pages_per_region(alloc_granularity / (page_size * commit_factor)) {
guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
virtual void commit_regions(uint start_idx, size_t num_regions) {
bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.set_range(start_idx, start_idx + num_regions);
fire_on_commit(start_idx, num_regions, zero_filled);
}
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
_storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
virtual void uncommit_regions(uint start_idx, size_t num_regions) {
_storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.clear_range(start_idx, start_idx + num_regions);
}
};
@ -98,22 +98,23 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public:
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t actual_size,
size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
_regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
_regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
uintptr_t idx = region_idx_to_page_idx(i);
virtual void commit_regions(uint start_idx, size_t num_regions) {
for (uint i = start_idx; i < start_idx + num_regions; i++) {
assert(!_commit_map.at(i), err_msg("Trying to commit storage at region %u that is already committed", i));
size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
bool zero_filled = false;
if (old_refcount == 0) {
@ -125,10 +126,10 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
}
}
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
uintptr_t idx = region_idx_to_page_idx(i);
virtual void uncommit_regions(uint start_idx, size_t num_regions) {
for (uint i = start_idx; i < start_idx + num_regions; i++) {
assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region %u that is not committed", i));
size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
assert(old_refcount > 0, "must be");
if (old_refcount == 1) {
@ -147,14 +148,15 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
}
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t actual_size,
size_t page_size,
size_t region_granularity,
size_t commit_factor,
MemoryType type) {
if (region_granularity >= (os_commit_granularity * commit_factor)) {
return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
if (region_granularity >= (page_size * commit_factor)) {
return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
} else {
return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
}
}

View file

@ -46,12 +46,12 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
protected:
// Backing storage.
G1PageBasedVirtualSpace _storage;
size_t _commit_granularity;
size_t _region_granularity;
// Mapping management
BitMap _commit_map;
G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type);
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
public:
@ -70,16 +70,20 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
return _commit_map.at(idx);
}
virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
virtual void commit_regions(uint start_idx, size_t num_regions = 1) = 0;
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
// The actual space to be used within the given reservation is given by actual_size.
// This is because some OSes need to round up the reservation size to guarantee
// alignment of page_size.
// The byte_translation_factor defines how many bytes in a region correspond to
// a single byte in the data structure this mapper is for.
// Eg. in the card table, this value corresponds to the size a single card
// table entry corresponds to.
// table entry corresponds to in the heap.
static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t actual_size,
size_t page_size,
size_t region_granularity,
size_t byte_translation_factor,
MemoryType type);

View file

@ -38,6 +38,7 @@
#include "oops/oop.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/intHisto.hpp"
#include "utilities/stack.inline.hpp"
#define CARD_REPEAT_HISTO 0

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
assert(_g1->is_in_reserved(obj), "must be in heap");
#endif // ASSERT
assert(from == NULL || from->is_in_reserved(p), "p is not in from");

View file

@ -116,7 +116,7 @@ void G1RootProcessor::wait_until_all_strong_classes_discovered() {
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
_g1h(g1h),
_process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
_srs(g1h),
_srs(),
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
_n_workers_discovered_strong_classes(0) {}
@ -253,7 +253,8 @@ void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
{
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
Threads::possibly_parallel_oops_do(strong_roots, thread_stack_clds, strong_code);
bool is_par = _g1h->n_par_threads() > 0;
Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
}
}
@ -323,10 +324,6 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
OopClosure* scan_non_heap_weak_roots,
uint worker_i) {
G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CodeCacheRoots, worker_i);
// Now scan the complement of the collection set.
G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
_g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);

View file

@ -26,7 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP
#include "memory/allocation.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/strongRootsScope.hpp"
#include "runtime/mutex.hpp"
class CLDClosure;
@ -46,7 +46,7 @@ class SubTasksDone;
class G1RootProcessor : public StackObj {
G1CollectedHeap* _g1h;
SubTasksDone* _process_strong_tasks;
SharedHeap::StrongRootsScope _srs;
StrongRootsScope _srs;
// Used to implement the Thread work barrier.
Monitor _lock;

View file

@ -206,7 +206,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
if (new_val == NULL) return;
// Otherwise, log it.
G1SATBCardTableLoggingModRefBS* g1_bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
g1_bs->write_ref_field_work(field, new_val);
}

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
#include "memory/gcLocker.hpp"
@ -163,7 +164,7 @@ void G1StringDedupQueue::verify() {
while (!iter.is_empty()) {
oop obj = iter.next();
if (obj != NULL) {
guarantee(Universe::heap()->is_in_reserved(obj), "Object must be on the heap");
guarantee(G1CollectedHeap::heap()->is_in_reserved(obj), "Object must be on the heap");
guarantee(!obj->is_forwarded(), "Object must not be forwarded");
guarantee(java_lang_String::is_instance(obj), "Object must be a String");
}

View file

@ -27,6 +27,7 @@
#include "classfile/javaClasses.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "memory/gcLocker.hpp"
#include "memory/padded.inline.hpp"
@ -519,7 +520,7 @@ void G1StringDedupTable::verify() {
while (*entry != NULL) {
typeArrayOop value = (*entry)->obj();
guarantee(value != NULL, "Object must not be NULL");
guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
guarantee(G1CollectedHeap::heap()->is_in_reserved(value), "Object must be on the heap");
guarantee(!value->is_forwarded(), "Object must not be forwarded");
guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
unsigned int hash = hash_code(value);

View file

@ -29,6 +29,7 @@
#include "runtime/mutexLocker.hpp"
class G1StringDedupEntryCache;
class G1StringDedupUnlinkOrOopsDoClosure;
//
// Table entry in the deduplication hashtable. Points weakly to the

View file

@ -31,7 +31,6 @@
#include "gc_implementation/g1/survRateGroup.hpp"
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/space.inline.hpp"
#include "memory/watermark.hpp"
#include "utilities/macros.hpp"

View file

@ -330,8 +330,12 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
assert(!hrclaimer->is_region_claimed(ch_index),
"Must not have been claimed yet because claiming of humongous continuation first claims the start region");
// There's no need to actually claim the continues humongous region, but we can do it in an assert as an extra precaution.
assert(hrclaimer->claim_region(ch_index), "We should always be able to claim the continuesHumongous part of the humongous object");
// Claim the region so no other worker tries to process the region. When a worker processes a
// starts_humongous region it may also process the associated continues_humongous regions.
// The continues_humongous regions can be changed to free regions. Unless this worker claims
// all of these regions, other workers might try claim and process these newly free regions.
bool claim_result = hrclaimer->claim_region(ch_index);
guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
bool res2 = blk->doHeapRegion(chr);
if (res2) {

View file

@ -419,6 +419,7 @@ void FreeRegionList_test() {
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs,
bot_rs.size(),
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,

View file

@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/satbQueue.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp"

View file

@ -26,8 +26,8 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionManager.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
\
@ -70,7 +70,7 @@
\
declare_toplevel_type(G1HeapRegionTable) \
\
declare_type(G1CollectedHeap, SharedHeap) \
declare_type(G1CollectedHeap, CollectedHeap) \
\
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
declare_type(HeapRegion, G1OffsetTableContigSpace) \

View file

@ -225,15 +225,10 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
void VM_CGC_Operation::doit() {
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
SharedHeap* sh = SharedHeap::heap();
// This could go away if CollectedHeap gave access to _gc_is_active...
if (sh != NULL) {
IsGCActiveMark x;
_cl->do_void();
} else {
_cl->do_void();
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id());
IsGCActiveMark x;
_cl->do_void();
}
bool VM_CGC_Operation::doit_prologue() {
@ -244,14 +239,12 @@ bool VM_CGC_Operation::doit_prologue() {
}
Heap_lock->lock();
SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
return true;
}
void VM_CGC_Operation::doit_epilogue() {
// Note the relative order of the unlocks must match that in
// VM_GC_Operation::doit_epilogue()
SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
Heap_lock->unlock();
if (_needs_pll) {
release_and_notify_pending_list_lock();

View file

@ -23,12 +23,12 @@
*/
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
@ -449,7 +449,7 @@ get_LNC_array_for_space(Space* sp,
// Do a dirty read here. If we pass the conditional then take the rare
// event lock and do the read again in case some other thread had already
// succeeded and done the resize.
int cur_collection = Universe::heap()->total_collections();
int cur_collection = GenCollectedHeap::heap()->total_collections();
if (_last_LNC_resizing_collection[i] != cur_collection) {
MutexLocker x(ParGCRareEvent_lock);
if (_last_LNC_resizing_collection[i] != cur_collection) {

View file

@ -42,7 +42,7 @@
#include "memory/generation.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/strongRootsScope.hpp"
#include "memory/space.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
@ -53,6 +53,7 @@
#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/stack.inline.hpp"
#include "utilities/workgroup.hpp"
#ifdef _MSC_VER
@ -117,7 +118,7 @@ bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) c
void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
assert(old->is_objArray(), "must be obj array");
assert(old->is_forwarded(), "must be forwarded");
assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
assert(!old_gen()->is_in(old), "must be in young generation.");
objArrayOop obj = objArrayOop(old->forwardee());
@ -199,9 +200,9 @@ bool ParScanThreadState::take_from_overflow_stack() {
for (size_t i = 0; i != num_take_elems; i++) {
oop cur = of_stack->pop();
oop obj_to_push = cur->forwardee();
assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
if (should_be_partially_scanned(obj_to_push, cur)) {
assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
obj_to_push = cur;
@ -596,8 +597,6 @@ void ParNewGenTask::work(uint worker_id) {
// and handle marks.
ResourceMark rm;
HandleMark hm;
// We would need multiple old-gen queues otherwise.
assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
assert(_state_set->is_valid(worker_id), "Should not have been called");
@ -697,7 +696,7 @@ void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
_par_cl->do_oop_nv(p);
if (Universe::heap()->is_in_reserved(p)) {
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
_rs->write_ref_field_gc_par(p, obj);
}
@ -724,7 +723,7 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
_cl->do_oop_nv(p);
if (Universe::heap()->is_in_reserved(p)) {
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
_rs->write_ref_field_gc_par(p, obj);
}
@ -823,8 +822,6 @@ public:
void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
{
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"not a generational heap");
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
_state_set.reset(workers->active_workers(), _generation.promotion_failed());
@ -899,7 +896,7 @@ void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThr
_gc_tracer.report_promotion_failed(_promotion_failed_info);
}
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
NOT_PRODUCT(gch->reset_promotion_should_fail();)
}
void ParNewGeneration::collect(bool full,
@ -912,8 +909,6 @@ void ParNewGeneration::collect(bool full,
_gc_timer->register_gc_start();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"not a CMS generational heap");
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need workgang for parallel work");
@ -922,8 +917,6 @@ void ParNewGeneration::collect(bool full,
workers->active_workers(),
Threads::number_of_non_daemon_threads());
workers->set_active_workers(active_workers);
assert(gch->n_gens() == 2,
"Par collection currently only works with single older gen.");
_old_gen = gch->old_gen();
// If the next generation is too full to accommodate worst-case promotion
@ -974,10 +967,10 @@ void ParNewGeneration::collect(bool full,
// in the multi-threaded case, but we special-case n=1 here to get
// repeatable measurements of the 1-thread overhead of the parallel code.
if (n_workers > 1) {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
workers->run_task(&tsk);
} else {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
tsk.work(0);
}
thread_state_set.reset(0 /* Bad value in debug if not reset */,
@ -1194,7 +1187,7 @@ oop ParNewGeneration::copy_to_survivor_space(
} else {
// Is in to-space; do copying ourselves.
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
assert(Universe::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
forward_ptr = old->forward_to_atomic(new_obj);
// Restore the mark word copied above.
new_obj->set_mark(m);

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/specialized_oop_closures.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
// Generate ParNew specialized oop_oop_iterate functions.
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(ALL_KLASS_OOP_OOP_ITERATE_DEFN);

View file

@ -70,7 +70,7 @@ template <class T>
inline void ParScanClosure::do_oop_work(T* p,
bool gc_barrier,
bool root_scan) {
assert((!Universe::heap()->is_in_reserved(p) ||
assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
generation()->is_in_reserved(p))
&& (generation()->level() == 0 || gc_barrier),
"The gen must be right, and we must be doing the barrier "
@ -82,7 +82,7 @@ inline void ParScanClosure::do_oop_work(T* p,
#ifndef PRODUCT
if (_g->to()->is_in_reserved(obj)) {
tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
GenCollectedHeap* gch = (GenCollectedHeap*)Universe::heap();
GenCollectedHeap* gch = GenCollectedHeap::heap();
Space* sp = gch->space_containing(p);
oop obj = oop(sp->block_start(p));
assert((HeapWord*)obj < (HeapWord*)p, "Error");

Some files were not shown because too many files have changed in this diff Show more