mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 11:34:38 +02:00
Merge
This commit is contained in:
commit
fe52024032
56 changed files with 1642 additions and 1467 deletions
1
.hgtags
1
.hgtags
|
@ -157,3 +157,4 @@ bac81e9f7d57b75fba5ab31b571f3fe0dc08af69 jdk8-b31
|
|||
a6e6d42203e6d35f9e8b31eac25b0021b4dd58ad jdk8-b33
|
||||
0ae89825c75c9492e44efb3aca3d9ee3d8a209df jdk8-b34
|
||||
f151d5833912a82cd4f203944da0305c3be83ecc jdk8-b35
|
||||
98ce9816ae089c959ba1e70fba98423a31c4e9fa jdk8-b36
|
||||
|
|
|
@ -157,3 +157,4 @@ cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21
|
|||
42f275168fa5d9e7c70b246614dca8cf81f52c2e jdk8-b33
|
||||
894a478d2c4819a1a0f230bd7bdd09f3b2de9a8c jdk8-b34
|
||||
5285317ebb4e8e4f6d8d52b5616fa801e2ea844d jdk8-b35
|
||||
6a6ba0a07f33d37a2f97b1107e60c6a9a69ec84d jdk8-b36
|
||||
|
|
2
common/autoconf/configure
vendored
2
common/autoconf/configure
vendored
|
@ -10489,7 +10489,7 @@ $as_echo "no, disabling ccaching of precompiled headers" >&6; }
|
|||
|
||||
# Setup default logging of stdout and stderr to build.log in the output root.
|
||||
BUILD_LOG='$(OUTPUT_ROOT)/build.log'
|
||||
BUILD_LOG_WRAPPER='$(SRC_ROOT)/common/bin/logger.sh $(BUILD_LOG)'
|
||||
BUILD_LOG_WRAPPER='$(SH) $(SRC_ROOT)/common/bin/logger.sh $(BUILD_LOG)'
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1004,7 +1004,7 @@ TESTFOR_PROG_CCACHE
|
|||
|
||||
# Setup default logging of stdout and stderr to build.log in the output root.
|
||||
BUILD_LOG='$(OUTPUT_ROOT)/build.log'
|
||||
BUILD_LOG_WRAPPER='$(SRC_ROOT)/common/bin/logger.sh $(BUILD_LOG)'
|
||||
BUILD_LOG_WRAPPER='$(SH) $(SRC_ROOT)/common/bin/logger.sh $(BUILD_LOG)'
|
||||
AC_SUBST(BUILD_LOG)
|
||||
AC_SUBST(BUILD_LOG_WRAPPER)
|
||||
|
||||
|
|
|
@ -157,3 +157,4 @@ e45d6b406d5f91ff5256a5c82456ab1e7eb8becd jdk8-b25
|
|||
1e2ac1ea3f6c32a62bf88f3fa330120c30db59cb jdk8-b33
|
||||
e24c5cc8b0f7cc48374eef0f995838fb4823e0eb jdk8-b34
|
||||
e3d735914edd0a621b16bb85417423f8e6af5d51 jdk8-b35
|
||||
a5a61f259961a7f46b002e5cc50b4a9bf86927b6 jdk8-b36
|
||||
|
|
|
@ -241,3 +241,5 @@ cd47da9383cd932cb2b659064057feafa2a91134 hs24-b06
|
|||
f284b08835584517c1ca3dd67341f569e763841f jdk8-b34
|
||||
f621660a297baa48fab9dca28e99d318826e8304 jdk8-b35
|
||||
dff6e3459210f8dd0430b9b03ccc99280560da30 hs24-b08
|
||||
50b4400ca1ecb2ac2fde35f5e53ec8f04b86be7f jdk8-b36
|
||||
7d5ec8bf38d1b12e0e09ec381f10976b8beede3b hs24-b09
|
||||
|
|
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
|
|||
|
||||
HS_MAJOR_VER=24
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=09
|
||||
HS_BUILD_NUMBER=10
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
|
|
@ -1462,7 +1462,11 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
|||
break;
|
||||
|
||||
case Bytecodes::_l2i:
|
||||
#ifdef _LP64
|
||||
__ movl(dest->as_register(), src->as_register_lo());
|
||||
#else
|
||||
move_regs(src->as_register_lo(), dest->as_register());
|
||||
#endif
|
||||
break;
|
||||
|
||||
case Bytecodes::_i2b:
|
||||
|
|
|
@ -336,7 +336,9 @@ extern "C" {
|
|||
// Return 0 (success) + file descriptor, or non-0 (error)
|
||||
if (res == 0) {
|
||||
door_desc_t desc;
|
||||
desc.d_attributes = DOOR_DESCRIPTOR;
|
||||
// DOOR_RELEASE flag makes sure fd is closed after passing it to
|
||||
// the client. See door_return(3DOOR) man page.
|
||||
desc.d_attributes = DOOR_DESCRIPTOR | DOOR_RELEASE;
|
||||
desc.d_data.d_desc.d_descriptor = return_fd;
|
||||
door_return((char*)&res, sizeof(res), &desc, 1);
|
||||
} else {
|
||||
|
|
|
@ -59,7 +59,7 @@ typedef struct _MODULEINFO {
|
|||
|
||||
#include <Tlhelp32.h>
|
||||
|
||||
typedef unsigned int socklen_t;
|
||||
typedef int socklen_t;
|
||||
|
||||
// #include "jni.h"
|
||||
|
||||
|
|
|
@ -4820,99 +4820,92 @@ struct hostent* os::get_host_by_name(char* name) {
|
|||
return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
|
||||
}
|
||||
|
||||
|
||||
int os::socket_close(int fd) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::closesocket(fd);
|
||||
}
|
||||
|
||||
int os::socket_available(int fd, jint *pbytes) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
|
||||
return (ret < 0) ? 0 : 1;
|
||||
}
|
||||
|
||||
int os::socket(int domain, int type, int protocol) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::socket(domain, type, protocol);
|
||||
}
|
||||
|
||||
int os::listen(int fd, int count) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::listen(fd, count);
|
||||
}
|
||||
|
||||
int os::connect(int fd, struct sockaddr* him, socklen_t len) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::connect(fd, him, len);
|
||||
}
|
||||
|
||||
int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::accept(fd, him, len);
|
||||
}
|
||||
|
||||
int os::sendto(int fd, char* buf, size_t len, uint flags,
|
||||
struct sockaddr* to, socklen_t tolen) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
|
||||
return ::sendto(fd, buf, (int)len, flags, to, tolen);
|
||||
}
|
||||
|
||||
int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
|
||||
sockaddr* from, socklen_t* fromlen) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
|
||||
return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
|
||||
}
|
||||
|
||||
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::recv(fd, buf, (int)nBytes, flags);
|
||||
}
|
||||
|
||||
int os::send(int fd, char* buf, size_t nBytes, uint flags) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::send(fd, buf, (int)nBytes, flags);
|
||||
}
|
||||
|
||||
int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::send(fd, buf, (int)nBytes, flags);
|
||||
}
|
||||
|
||||
int os::timeout(int fd, long timeout) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
fd_set tbl;
|
||||
struct timeval t;
|
||||
|
||||
t.tv_sec = timeout / 1000;
|
||||
t.tv_usec = (timeout % 1000) * 1000;
|
||||
|
||||
tbl.fd_count = 1;
|
||||
tbl.fd_array[0] = fd;
|
||||
|
||||
return ::select(1, &tbl, 0, 0, &t);
|
||||
}
|
||||
|
||||
int os::get_host_name(char* name, int namelen) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::gethostname(name, namelen);
|
||||
}
|
||||
|
||||
int os::socket_shutdown(int fd, int howto) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::shutdown(fd, howto);
|
||||
}
|
||||
|
||||
int os::bind(int fd, struct sockaddr* him, socklen_t len) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::bind(fd, him, len);
|
||||
}
|
||||
|
||||
int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::getsockname(fd, him, len);
|
||||
}
|
||||
|
||||
int os::get_sock_opt(int fd, int level, int optname,
|
||||
char* optval, socklen_t* optlen) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::getsockopt(fd, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
int os::set_sock_opt(int fd, int level, int optname,
|
||||
const char* optval, socklen_t optlen) {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return ::setsockopt(fd, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -3355,7 +3355,8 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
|
|||
static_field_size,
|
||||
total_oop_map_count,
|
||||
access_flags,
|
||||
rt, CHECK_(nullHandle));
|
||||
rt, host_klass,
|
||||
CHECK_(nullHandle));
|
||||
instanceKlassHandle this_klass (THREAD, ik);
|
||||
|
||||
assert(this_klass->static_field_size() == static_field_size, "sanity");
|
||||
|
|
|
@ -1,296 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_BINARYTREEDICTIONARY_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_BINARYTREEDICTIONARY_HPP
|
||||
|
||||
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeList.hpp"
|
||||
|
||||
/*
|
||||
* A binary tree based search structure for free blocks.
|
||||
* This is currently used in the Concurrent Mark&Sweep implementation.
|
||||
*/
|
||||
|
||||
// A TreeList is a FreeList which can be used to maintain a
|
||||
// binary tree of free lists.
|
||||
|
||||
class TreeChunk;
|
||||
class BinaryTreeDictionary;
|
||||
class AscendTreeCensusClosure;
|
||||
class DescendTreeCensusClosure;
|
||||
class DescendTreeSearchClosure;
|
||||
|
||||
class TreeList: public FreeList {
|
||||
friend class TreeChunk;
|
||||
friend class BinaryTreeDictionary;
|
||||
friend class AscendTreeCensusClosure;
|
||||
friend class DescendTreeCensusClosure;
|
||||
friend class DescendTreeSearchClosure;
|
||||
|
||||
protected:
|
||||
TreeList* parent() const { return _parent; }
|
||||
TreeList* left() const { return _left; }
|
||||
TreeList* right() const { return _right; }
|
||||
|
||||
// Accessors for links in tree.
|
||||
|
||||
void setLeft(TreeList* tl) {
|
||||
_left = tl;
|
||||
if (tl != NULL)
|
||||
tl->setParent(this);
|
||||
}
|
||||
void setRight(TreeList* tl) {
|
||||
_right = tl;
|
||||
if (tl != NULL)
|
||||
tl->setParent(this);
|
||||
}
|
||||
void setParent(TreeList* tl) { _parent = tl; }
|
||||
|
||||
void clearLeft() { _left = NULL; }
|
||||
void clearRight() { _right = NULL; }
|
||||
void clearParent() { _parent = NULL; }
|
||||
void initialize() { clearLeft(); clearRight(), clearParent(); }
|
||||
|
||||
// For constructing a TreeList from a Tree chunk or
|
||||
// address and size.
|
||||
static TreeList* as_TreeList(TreeChunk* tc);
|
||||
static TreeList* as_TreeList(HeapWord* addr, size_t size);
|
||||
|
||||
// Returns the head of the free list as a pointer to a TreeChunk.
|
||||
TreeChunk* head_as_TreeChunk();
|
||||
|
||||
// Returns the first available chunk in the free list as a pointer
|
||||
// to a TreeChunk.
|
||||
TreeChunk* first_available();
|
||||
|
||||
// Returns the block with the largest heap address amongst
|
||||
// those in the list for this size; potentially slow and expensive,
|
||||
// use with caution!
|
||||
TreeChunk* largest_address();
|
||||
|
||||
// removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
|
||||
// If "tc" is the first chunk in the list, it is also the
|
||||
// TreeList that is the node in the tree. removeChunkReplaceIfNeeded()
|
||||
// returns the possibly replaced TreeList* for the node in
|
||||
// the tree. It also updates the parent of the original
|
||||
// node to point to the new node.
|
||||
TreeList* removeChunkReplaceIfNeeded(TreeChunk* tc);
|
||||
// See FreeList.
|
||||
void returnChunkAtHead(TreeChunk* tc);
|
||||
void returnChunkAtTail(TreeChunk* tc);
|
||||
};
|
||||
|
||||
// A TreeChunk is a subclass of a FreeChunk that additionally
|
||||
// maintains a pointer to the free list on which it is currently
|
||||
// linked.
|
||||
// A TreeChunk is also used as a node in the binary tree. This
|
||||
// allows the binary tree to be maintained without any additional
|
||||
// storage (the free chunks are used). In a binary tree the first
|
||||
// chunk in the free list is also the tree node. Note that the
|
||||
// TreeChunk has an embedded TreeList for this purpose. Because
|
||||
// the first chunk in the list is distinguished in this fashion
|
||||
// (also is the node in the tree), it is the last chunk to be found
|
||||
// on the free list for a node in the tree and is only removed if
|
||||
// it is the last chunk on the free list.
|
||||
|
||||
class TreeChunk : public FreeChunk {
|
||||
friend class TreeList;
|
||||
TreeList* _list;
|
||||
TreeList _embedded_list; // if non-null, this chunk is on _list
|
||||
protected:
|
||||
TreeList* embedded_list() const { return (TreeList*) &_embedded_list; }
|
||||
void set_embedded_list(TreeList* v) { _embedded_list = *v; }
|
||||
public:
|
||||
TreeList* list() { return _list; }
|
||||
void set_list(TreeList* v) { _list = v; }
|
||||
static TreeChunk* as_TreeChunk(FreeChunk* fc);
|
||||
// Initialize fields in a TreeChunk that should be
|
||||
// initialized when the TreeChunk is being added to
|
||||
// a free list in the tree.
|
||||
void initialize() { embedded_list()->initialize(); }
|
||||
|
||||
// debugging
|
||||
void verifyTreeChunkList() const;
|
||||
};
|
||||
|
||||
const size_t MIN_TREE_CHUNK_SIZE = sizeof(TreeChunk)/HeapWordSize;
|
||||
|
||||
class BinaryTreeDictionary: public FreeBlockDictionary {
|
||||
friend class VMStructs;
|
||||
bool _splay;
|
||||
size_t _totalSize;
|
||||
size_t _totalFreeBlocks;
|
||||
TreeList* _root;
|
||||
|
||||
// private accessors
|
||||
bool splay() const { return _splay; }
|
||||
void set_splay(bool v) { _splay = v; }
|
||||
size_t totalSize() const { return _totalSize; }
|
||||
void set_totalSize(size_t v) { _totalSize = v; }
|
||||
virtual void inc_totalSize(size_t v);
|
||||
virtual void dec_totalSize(size_t v);
|
||||
size_t totalFreeBlocks() const { return _totalFreeBlocks; }
|
||||
void set_totalFreeBlocks(size_t v) { _totalFreeBlocks = v; }
|
||||
TreeList* root() const { return _root; }
|
||||
void set_root(TreeList* v) { _root = v; }
|
||||
|
||||
// Remove a chunk of size "size" or larger from the tree and
|
||||
// return it. If the chunk
|
||||
// is the last chunk of that size, remove the node for that size
|
||||
// from the tree.
|
||||
TreeChunk* getChunkFromTree(size_t size, Dither dither, bool splay);
|
||||
// Return a list of the specified size or NULL from the tree.
|
||||
// The list is not removed from the tree.
|
||||
TreeList* findList (size_t size) const;
|
||||
// Remove this chunk from the tree. If the removal results
|
||||
// in an empty list in the tree, remove the empty list.
|
||||
TreeChunk* removeChunkFromTree(TreeChunk* tc);
|
||||
// Remove the node in the trees starting at tl that has the
|
||||
// minimum value and return it. Repair the tree as needed.
|
||||
TreeList* removeTreeMinimum(TreeList* tl);
|
||||
void semiSplayStep(TreeList* tl);
|
||||
// Add this free chunk to the tree.
|
||||
void insertChunkInTree(FreeChunk* freeChunk);
|
||||
public:
|
||||
void verifyTree() const;
|
||||
// verify that the given chunk is in the tree.
|
||||
bool verifyChunkInFreeLists(FreeChunk* tc) const;
|
||||
private:
|
||||
void verifyTreeHelper(TreeList* tl) const;
|
||||
static size_t verifyPrevFreePtrs(TreeList* tl);
|
||||
|
||||
// Returns the total number of chunks in the list.
|
||||
size_t totalListLength(TreeList* tl) const;
|
||||
// Returns the total number of words in the chunks in the tree
|
||||
// starting at "tl".
|
||||
size_t totalSizeInTree(TreeList* tl) const;
|
||||
// Returns the sum of the square of the size of each block
|
||||
// in the tree starting at "tl".
|
||||
double sum_of_squared_block_sizes(TreeList* const tl) const;
|
||||
// Returns the total number of free blocks in the tree starting
|
||||
// at "tl".
|
||||
size_t totalFreeBlocksInTree(TreeList* tl) const;
|
||||
size_t numFreeBlocks() const;
|
||||
size_t treeHeight() const;
|
||||
size_t treeHeightHelper(TreeList* tl) const;
|
||||
size_t totalNodesInTree(TreeList* tl) const;
|
||||
size_t totalNodesHelper(TreeList* tl) const;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
BinaryTreeDictionary(MemRegion mr, bool splay = false);
|
||||
|
||||
// Reset the dictionary to the initial conditions with
|
||||
// a single free chunk.
|
||||
void reset(MemRegion mr);
|
||||
void reset(HeapWord* addr, size_t size);
|
||||
// Reset the dictionary to be empty.
|
||||
void reset();
|
||||
|
||||
// Return a chunk of size "size" or greater from
|
||||
// the tree.
|
||||
// want a better dynamic splay strategy for the future.
|
||||
FreeChunk* getChunk(size_t size, Dither dither) {
|
||||
verify_par_locked();
|
||||
FreeChunk* res = getChunkFromTree(size, dither, splay());
|
||||
assert(res == NULL || res->isFree(),
|
||||
"Should be returning a free chunk");
|
||||
return res;
|
||||
}
|
||||
|
||||
void returnChunk(FreeChunk* chunk) {
|
||||
verify_par_locked();
|
||||
insertChunkInTree(chunk);
|
||||
}
|
||||
|
||||
void removeChunk(FreeChunk* chunk) {
|
||||
verify_par_locked();
|
||||
removeChunkFromTree((TreeChunk*)chunk);
|
||||
assert(chunk->isFree(), "Should still be a free chunk");
|
||||
}
|
||||
|
||||
size_t maxChunkSize() const;
|
||||
size_t totalChunkSize(debug_only(const Mutex* lock)) const {
|
||||
debug_only(
|
||||
if (lock != NULL && lock->owned_by_self()) {
|
||||
assert(totalSizeInTree(root()) == totalSize(),
|
||||
"_totalSize inconsistency");
|
||||
}
|
||||
)
|
||||
return totalSize();
|
||||
}
|
||||
|
||||
size_t minSize() const {
|
||||
return MIN_TREE_CHUNK_SIZE;
|
||||
}
|
||||
|
||||
double sum_of_squared_block_sizes() const {
|
||||
return sum_of_squared_block_sizes(root());
|
||||
}
|
||||
|
||||
FreeChunk* find_chunk_ends_at(HeapWord* target) const;
|
||||
|
||||
// Find the list with size "size" in the binary tree and update
|
||||
// the statistics in the list according to "split" (chunk was
|
||||
// split or coalesce) and "birth" (chunk was added or removed).
|
||||
void dictCensusUpdate(size_t size, bool split, bool birth);
|
||||
// Return true if the dictionary is overpopulated (more chunks of
|
||||
// this size than desired) for size "size".
|
||||
bool coalDictOverPopulated(size_t size);
|
||||
// Methods called at the beginning of a sweep to prepare the
|
||||
// statistics for the sweep.
|
||||
void beginSweepDictCensus(double coalSurplusPercent,
|
||||
float inter_sweep_current,
|
||||
float inter_sweep_estimate,
|
||||
float intra_sweep_estimate);
|
||||
// Methods called after the end of a sweep to modify the
|
||||
// statistics for the sweep.
|
||||
void endSweepDictCensus(double splitSurplusPercent);
|
||||
// Return the largest free chunk in the tree.
|
||||
FreeChunk* findLargestDict() const;
|
||||
// Accessors for statistics
|
||||
void setTreeSurplus(double splitSurplusPercent);
|
||||
void setTreeHints(void);
|
||||
// Reset statistics for all the lists in the tree.
|
||||
void clearTreeCensus(void);
|
||||
// Print the statistcis for all the lists in the tree. Also may
|
||||
// print out summaries.
|
||||
void printDictCensus(void) const;
|
||||
void print_free_lists(outputStream* st) const;
|
||||
|
||||
// For debugging. Returns the sum of the _returnedBytes for
|
||||
// all lists in the tree.
|
||||
size_t sumDictReturnedBytes() PRODUCT_RETURN0;
|
||||
// Sets the _returnedBytes for all the lists in the tree to zero.
|
||||
void initializeDictReturnedBytes() PRODUCT_RETURN;
|
||||
// For debugging. Return the total number of chunks in the dictionary.
|
||||
size_t totalCount() PRODUCT_RETURN0;
|
||||
|
||||
void reportStatistics() const;
|
||||
|
||||
void verify() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_BINARYTREEDICTIONARY_HPP
|
|
@ -38,7 +38,7 @@
|
|||
|
||||
CMSPermGen::CMSPermGen(ReservedSpace rs, size_t initial_byte_size,
|
||||
CardTableRS* ct,
|
||||
FreeBlockDictionary::DictionaryChoice dictionaryChoice) {
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) {
|
||||
CMSPermGenGen* g =
|
||||
new CMSPermGenGen(rs, initial_byte_size, -1, ct);
|
||||
if (g == NULL) {
|
||||
|
|
|
@ -45,7 +45,7 @@ class CMSPermGen: public PermGen {
|
|||
|
||||
public:
|
||||
CMSPermGen(ReservedSpace rs, size_t initial_byte_size,
|
||||
CardTableRS* ct, FreeBlockDictionary::DictionaryChoice);
|
||||
CardTableRS* ct, FreeBlockDictionary<FreeChunk>::DictionaryChoice);
|
||||
|
||||
HeapWord* mem_allocate(size_t size);
|
||||
|
||||
|
@ -65,7 +65,7 @@ public:
|
|||
// regarding not using adaptive free lists for a perm gen.
|
||||
ConcurrentMarkSweepGeneration(rs, initial_byte_size, // MinPermHeapExapnsion
|
||||
level, ct, false /* use adaptive freelists */,
|
||||
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice)
|
||||
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice)
|
||||
{}
|
||||
|
||||
void initialize_performance_counters();
|
||||
|
|
|
@ -69,7 +69,7 @@ void CompactibleFreeListSpace::set_cms_values() {
|
|||
// Constructor
|
||||
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
||||
MemRegion mr, bool use_adaptive_freelists,
|
||||
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
|
||||
_dictionaryChoice(dictionaryChoice),
|
||||
_adaptive_freelists(use_adaptive_freelists),
|
||||
_bt(bs, mr),
|
||||
|
@ -87,6 +87,8 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
|||
CMSConcMarkMultiple),
|
||||
_collector(NULL)
|
||||
{
|
||||
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
|
||||
"FreeChunk is larger than expected");
|
||||
_bt.set_space(this);
|
||||
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||
// We have all of "mr", all of which we place in the dictionary
|
||||
|
@ -96,13 +98,13 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
|||
// implementation, namely, the simple binary tree (splaying
|
||||
// temporarily disabled).
|
||||
switch (dictionaryChoice) {
|
||||
case FreeBlockDictionary::dictionarySplayTree:
|
||||
case FreeBlockDictionary::dictionarySkipList:
|
||||
case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
|
||||
case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
|
||||
default:
|
||||
warning("dictionaryChoice: selected option not understood; using"
|
||||
" default BinaryTreeDictionary implementation instead.");
|
||||
case FreeBlockDictionary::dictionaryBinaryTree:
|
||||
_dictionary = new BinaryTreeDictionary(mr);
|
||||
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
|
||||
_dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
|
||||
break;
|
||||
}
|
||||
assert(_dictionary != NULL, "CMS dictionary initialization");
|
||||
|
@ -117,7 +119,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
|||
// moved to its new location before the klass is moved.
|
||||
// Set the _refillSize for the linear allocation blocks
|
||||
if (!use_adaptive_freelists) {
|
||||
FreeChunk* fc = _dictionary->getChunk(mr.word_size());
|
||||
FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
|
||||
// The small linAB initially has all the space and will allocate
|
||||
// a chunk of any size.
|
||||
HeapWord* addr = (HeapWord*) fc;
|
||||
|
@ -273,12 +275,12 @@ void CompactibleFreeListSpace::reset(MemRegion mr) {
|
|||
assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
|
||||
_bt.single_block(mr.start(), mr.word_size());
|
||||
FreeChunk* fc = (FreeChunk*) mr.start();
|
||||
fc->setSize(mr.word_size());
|
||||
fc->set_size(mr.word_size());
|
||||
if (mr.word_size() >= IndexSetSize ) {
|
||||
returnChunkToDictionary(fc);
|
||||
} else {
|
||||
_bt.verify_not_unallocated((HeapWord*)fc, fc->size());
|
||||
_indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
|
||||
_indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
|
||||
}
|
||||
}
|
||||
_promoInfo.reset();
|
||||
|
@ -296,7 +298,7 @@ void CompactibleFreeListSpace::reset_after_compaction() {
|
|||
} else {
|
||||
// Place as much of mr in the linAB as we can get,
|
||||
// provided it was big enough to go into the dictionary.
|
||||
FreeChunk* fc = dictionary()->findLargestDict();
|
||||
FreeChunk* fc = dictionary()->find_largest_dict();
|
||||
if (fc != NULL) {
|
||||
assert(fc->size() == mr.word_size(),
|
||||
"Why was the chunk broken up?");
|
||||
|
@ -323,14 +325,14 @@ FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
|
|||
#ifndef PRODUCT
|
||||
void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
_indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
|
||||
_indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
|
||||
}
|
||||
}
|
||||
|
||||
size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
|
||||
size_t sum = 0;
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
|
||||
sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
@ -354,7 +356,7 @@ size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
|
|||
|
||||
size_t CompactibleFreeListSpace::totalCount() {
|
||||
size_t num = totalCountInIndexedFreeLists();
|
||||
num += dictionary()->totalCount();
|
||||
num += dictionary()->total_count();
|
||||
if (_smallLinearAllocBlock._word_size != 0) {
|
||||
num++;
|
||||
}
|
||||
|
@ -364,7 +366,7 @@ size_t CompactibleFreeListSpace::totalCount() {
|
|||
|
||||
bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
|
||||
FreeChunk* fc = (FreeChunk*) p;
|
||||
return fc->isFree();
|
||||
return fc->is_free();
|
||||
}
|
||||
|
||||
size_t CompactibleFreeListSpace::used() const {
|
||||
|
@ -391,7 +393,7 @@ size_t CompactibleFreeListSpace::free() const {
|
|||
// that supports jvmstat, and you are apt to see the values
|
||||
// flicker in such cases.
|
||||
assert(_dictionary != NULL, "No _dictionary?");
|
||||
return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
|
||||
return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
|
||||
totalSizeInIndexedFreeLists() +
|
||||
_smallLinearAllocBlock._word_size) * HeapWordSize;
|
||||
}
|
||||
|
@ -399,7 +401,7 @@ size_t CompactibleFreeListSpace::free() const {
|
|||
size_t CompactibleFreeListSpace::max_alloc_in_words() const {
|
||||
assert(_dictionary != NULL, "No _dictionary?");
|
||||
assert_locked();
|
||||
size_t res = _dictionary->maxChunkSize();
|
||||
size_t res = _dictionary->max_chunk_size();
|
||||
res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
|
||||
(size_t) SmallForLinearAlloc - 1));
|
||||
// XXX the following could potentially be pretty slow;
|
||||
|
@ -448,7 +450,7 @@ const {
|
|||
reportIndexedFreeListStatistics();
|
||||
gclog_or_tty->print_cr("Layout of Indexed Freelists");
|
||||
gclog_or_tty->print_cr("---------------------------");
|
||||
FreeList::print_labels_on(st, "size");
|
||||
FreeList<FreeChunk>::print_labels_on(st, "size");
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
_indexedFreeList[i].print_on(gclog_or_tty);
|
||||
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
|
||||
|
@ -467,7 +469,7 @@ const {
|
|||
|
||||
void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
|
||||
const {
|
||||
_dictionary->reportStatistics();
|
||||
_dictionary->report_statistics();
|
||||
st->print_cr("Layout of Freelists in Tree");
|
||||
st->print_cr("---------------------------");
|
||||
_dictionary->print_free_lists(st);
|
||||
|
@ -545,12 +547,12 @@ void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
|
|||
void CompactibleFreeListSpace::reportFreeListStatistics() const {
|
||||
assert_lock_strong(&_freelistLock);
|
||||
assert(PrintFLSStatistics != 0, "Reporting error");
|
||||
_dictionary->reportStatistics();
|
||||
_dictionary->report_statistics();
|
||||
if (PrintFLSStatistics > 1) {
|
||||
reportIndexedFreeListStatistics();
|
||||
size_t totalSize = totalSizeInIndexedFreeLists() +
|
||||
_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
|
||||
gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
|
||||
size_t total_size = totalSizeInIndexedFreeLists() +
|
||||
_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
|
||||
gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -558,13 +560,13 @@ void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
|
|||
assert_lock_strong(&_freelistLock);
|
||||
gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
|
||||
"--------------------------------\n");
|
||||
size_t totalSize = totalSizeInIndexedFreeLists();
|
||||
size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
|
||||
gclog_or_tty->print("Total Free Space: %d\n", totalSize);
|
||||
size_t total_size = totalSizeInIndexedFreeLists();
|
||||
size_t free_blocks = numFreeBlocksInIndexedFreeLists();
|
||||
gclog_or_tty->print("Total Free Space: %d\n", total_size);
|
||||
gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
|
||||
gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
|
||||
if (freeBlocks != 0) {
|
||||
gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
|
||||
gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
|
||||
if (free_blocks != 0) {
|
||||
gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -911,7 +913,7 @@ CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
|
|||
for (addr = bottom(), last = end();
|
||||
addr < last; addr += size) {
|
||||
FreeChunk* fc = (FreeChunk*)addr;
|
||||
if (fc->isFree()) {
|
||||
if (fc->is_free()) {
|
||||
// Since we hold the free list lock, which protects direct
|
||||
// allocation in this generation by mutators, a free object
|
||||
// will remain free throughout this iteration code.
|
||||
|
@ -953,7 +955,7 @@ CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
|
|||
for (addr = block_start_careful(mr.start()), end = mr.end();
|
||||
addr < end; addr += size) {
|
||||
FreeChunk* fc = (FreeChunk*)addr;
|
||||
if (fc->isFree()) {
|
||||
if (fc->is_free()) {
|
||||
// Since we hold the free list lock, which protects direct
|
||||
// allocation in this generation by mutators, a free object
|
||||
// will remain free throughout this iteration code.
|
||||
|
@ -1069,7 +1071,7 @@ size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
|
|||
NOT_PRODUCT(verify_objects_initialized());
|
||||
assert(MemRegion(bottom(), end()).contains(p), "p not in space");
|
||||
FreeChunk* fc = (FreeChunk*)p;
|
||||
if (fc->isFree()) {
|
||||
if (fc->is_free()) {
|
||||
return fc->size();
|
||||
} else {
|
||||
// Ignore mark word because this may be a recently promoted
|
||||
|
@ -1160,7 +1162,7 @@ bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
|
|||
FreeChunk* fc = (FreeChunk*)p;
|
||||
assert(is_in_reserved(p), "Should be in space");
|
||||
assert(_bt.block_start(p) == p, "Should be a block boundary");
|
||||
if (!fc->isFree()) {
|
||||
if (!fc->is_free()) {
|
||||
// Ignore mark word because it may have been used to
|
||||
// chain together promoted objects (the last one
|
||||
// would have a null value).
|
||||
|
@ -1222,7 +1224,7 @@ HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
|
|||
|
||||
FreeChunk* fc = (FreeChunk*)res;
|
||||
fc->markNotFree();
|
||||
assert(!fc->isFree(), "shouldn't be marked free");
|
||||
assert(!fc->is_free(), "shouldn't be marked free");
|
||||
assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
|
||||
// Verify that the block offset table shows this to
|
||||
// be a single block, but not one which is unallocated.
|
||||
|
@ -1331,10 +1333,10 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
|
|||
size_t currSize = numWords + MinChunkSize;
|
||||
assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
|
||||
for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
|
||||
FreeList* fl = &_indexedFreeList[i];
|
||||
FreeList<FreeChunk>* fl = &_indexedFreeList[i];
|
||||
if (fl->head()) {
|
||||
ret = getFromListGreater(fl, numWords);
|
||||
assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
|
||||
assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -1345,7 +1347,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
|
|||
/* Try to get a chunk that satisfies request, while avoiding
|
||||
fragmentation that can't be handled. */
|
||||
{
|
||||
ret = dictionary()->getChunk(currSize);
|
||||
ret = dictionary()->get_chunk(currSize);
|
||||
if (ret != NULL) {
|
||||
assert(ret->size() - numWords >= MinChunkSize,
|
||||
"Chunk is too small");
|
||||
|
@ -1353,10 +1355,10 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
|
|||
/* Carve returned chunk. */
|
||||
(void) splitChunkAndReturnRemainder(ret, numWords);
|
||||
/* Label this as no longer a free chunk. */
|
||||
assert(ret->isFree(), "This chunk should be free");
|
||||
ret->linkPrev(NULL);
|
||||
assert(ret->is_free(), "This chunk should be free");
|
||||
ret->link_prev(NULL);
|
||||
}
|
||||
assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
|
||||
assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
|
||||
return ret;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
|
@ -1364,7 +1366,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
|
|||
|
||||
bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
|
||||
assert(fc->size() < IndexSetSize, "Size of chunk is too large");
|
||||
return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
|
||||
return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
|
||||
}
|
||||
|
||||
bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
|
||||
|
@ -1378,13 +1380,13 @@ bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc)
|
|||
// Check if the purported free chunk is present either as a linear
|
||||
// allocation block, the size-indexed table of (smaller) free blocks,
|
||||
// or the larger free blocks kept in the binary tree dictionary.
|
||||
bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
|
||||
bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
|
||||
if (verify_chunk_is_linear_alloc_block(fc)) {
|
||||
return true;
|
||||
} else if (fc->size() < IndexSetSize) {
|
||||
return verifyChunkInIndexedFreeLists(fc);
|
||||
} else {
|
||||
return dictionary()->verifyChunkInFreeLists(fc);
|
||||
return dictionary()->verify_chunk_in_free_list(fc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1412,7 +1414,7 @@ FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
|
|||
}
|
||||
if (fc != NULL) {
|
||||
fc->dontCoalesce();
|
||||
assert(fc->isFree(), "Should be free, but not coalescable");
|
||||
assert(fc->is_free(), "Should be free, but not coalescable");
|
||||
// Verify that the block offset table shows this to
|
||||
// be a single block, but not one which is unallocated.
|
||||
_bt.verify_single_block((HeapWord*)fc, fc->size());
|
||||
|
@ -1492,7 +1494,7 @@ CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
|
|||
}
|
||||
// Return the chunk that isn't big enough, and then refill below.
|
||||
addChunkToFreeLists(blk->_ptr, sz);
|
||||
splitBirth(sz);
|
||||
split_birth(sz);
|
||||
// Don't keep statistics on adding back chunk from a LinAB.
|
||||
} else {
|
||||
// A refilled block would not satisfy the request.
|
||||
|
@ -1504,14 +1506,14 @@ CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
|
|||
assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
|
||||
"block was replenished");
|
||||
if (res != NULL) {
|
||||
splitBirth(size);
|
||||
split_birth(size);
|
||||
repairLinearAllocBlock(blk);
|
||||
} else if (blk->_ptr != NULL) {
|
||||
res = blk->_ptr;
|
||||
size_t blk_size = blk->_word_size;
|
||||
blk->_word_size -= size;
|
||||
blk->_ptr += size;
|
||||
splitBirth(size);
|
||||
split_birth(size);
|
||||
repairLinearAllocBlock(blk);
|
||||
// Update BOT last so that other (parallel) GC threads see a consistent
|
||||
// view of the BOT and free blocks.
|
||||
|
@ -1540,7 +1542,7 @@ HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
|
|||
size_t blk_size = blk->_word_size;
|
||||
blk->_word_size -= size;
|
||||
blk->_ptr += size;
|
||||
splitBirth(size);
|
||||
split_birth(size);
|
||||
repairLinearAllocBlock(blk);
|
||||
// Update BOT last so that other (parallel) GC threads see a consistent
|
||||
// view of the BOT and free blocks.
|
||||
|
@ -1557,7 +1559,7 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
|
|||
assert_locked();
|
||||
assert(size < SmallForDictionary, "just checking");
|
||||
FreeChunk* res;
|
||||
res = _indexedFreeList[size].getChunkAtHead();
|
||||
res = _indexedFreeList[size].get_chunk_at_head();
|
||||
if (res == NULL) {
|
||||
res = getChunkFromIndexedFreeListHelper(size);
|
||||
}
|
||||
|
@ -1591,7 +1593,7 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
|
|||
// Do not replenish from an underpopulated size.
|
||||
if (_indexedFreeList[replenish_size].surplus() > 0 &&
|
||||
_indexedFreeList[replenish_size].head() != NULL) {
|
||||
newFc = _indexedFreeList[replenish_size].getChunkAtHead();
|
||||
newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
|
||||
} else if (bestFitFirst()) {
|
||||
newFc = bestFitSmall(replenish_size);
|
||||
}
|
||||
|
@ -1624,13 +1626,13 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
|
|||
i < (num_blk - 1);
|
||||
curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
|
||||
i++) {
|
||||
curFc->setSize(size);
|
||||
curFc->set_size(size);
|
||||
// Don't record this as a return in order to try and
|
||||
// determine the "returns" from a GC.
|
||||
_bt.verify_not_unallocated((HeapWord*) fc, size);
|
||||
_indexedFreeList[size].returnChunkAtTail(curFc, false);
|
||||
_indexedFreeList[size].return_chunk_at_tail(curFc, false);
|
||||
_bt.mark_block((HeapWord*)curFc, size);
|
||||
splitBirth(size);
|
||||
split_birth(size);
|
||||
// Don't record the initial population of the indexed list
|
||||
// as a split birth.
|
||||
}
|
||||
|
@ -1638,9 +1640,9 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
|
|||
// check that the arithmetic was OK above
|
||||
assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
|
||||
"inconsistency in carving newFc");
|
||||
curFc->setSize(size);
|
||||
curFc->set_size(size);
|
||||
_bt.mark_block((HeapWord*)curFc, size);
|
||||
splitBirth(size);
|
||||
split_birth(size);
|
||||
fc = curFc;
|
||||
} else {
|
||||
// Return entire block to caller
|
||||
|
@ -1653,14 +1655,14 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
|
|||
// replenish the indexed free list.
|
||||
fc = getChunkFromDictionaryExact(size);
|
||||
}
|
||||
// assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
|
||||
// assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
|
||||
return fc;
|
||||
}
|
||||
|
||||
FreeChunk*
|
||||
CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
|
||||
assert_locked();
|
||||
FreeChunk* fc = _dictionary->getChunk(size);
|
||||
FreeChunk* fc = _dictionary->get_chunk(size);
|
||||
if (fc == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1677,7 +1679,7 @@ CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
|
|||
FreeChunk*
|
||||
CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
|
||||
assert_locked();
|
||||
FreeChunk* fc = _dictionary->getChunk(size);
|
||||
FreeChunk* fc = _dictionary->get_chunk(size);
|
||||
if (fc == NULL) {
|
||||
return fc;
|
||||
}
|
||||
|
@ -1686,11 +1688,11 @@ CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
|
|||
_bt.verify_single_block((HeapWord*)fc, size);
|
||||
return fc;
|
||||
}
|
||||
assert(fc->size() > size, "getChunk() guarantee");
|
||||
assert(fc->size() > size, "get_chunk() guarantee");
|
||||
if (fc->size() < size + MinChunkSize) {
|
||||
// Return the chunk to the dictionary and go get a bigger one.
|
||||
returnChunkToDictionary(fc);
|
||||
fc = _dictionary->getChunk(size + MinChunkSize);
|
||||
fc = _dictionary->get_chunk(size + MinChunkSize);
|
||||
if (fc == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1711,10 +1713,10 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
|
|||
_bt.verify_single_block((HeapWord*)chunk, size);
|
||||
// adjust _unallocated_block downward, as necessary
|
||||
_bt.freed((HeapWord*)chunk, size);
|
||||
_dictionary->returnChunk(chunk);
|
||||
_dictionary->return_chunk(chunk);
|
||||
#ifndef PRODUCT
|
||||
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
|
||||
TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
|
||||
TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
|
||||
}
|
||||
#endif // PRODUCT
|
||||
}
|
||||
|
@ -1726,9 +1728,9 @@ CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
|
|||
_bt.verify_single_block((HeapWord*) fc, size);
|
||||
_bt.verify_not_unallocated((HeapWord*) fc, size);
|
||||
if (_adaptive_freelists) {
|
||||
_indexedFreeList[size].returnChunkAtTail(fc);
|
||||
_indexedFreeList[size].return_chunk_at_tail(fc);
|
||||
} else {
|
||||
_indexedFreeList[size].returnChunkAtHead(fc);
|
||||
_indexedFreeList[size].return_chunk_at_head(fc);
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
|
||||
|
@ -1756,7 +1758,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
|
|||
FreeChunk* ec;
|
||||
{
|
||||
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
|
||||
ec = dictionary()->findLargestDict(); // get largest block
|
||||
ec = dictionary()->find_largest_dict(); // get largest block
|
||||
if (ec != NULL && ec->end() == chunk) {
|
||||
// It's a coterminal block - we can coalesce.
|
||||
size_t old_size = ec->size();
|
||||
|
@ -1767,7 +1769,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
|
|||
ec = (FreeChunk*)chunk;
|
||||
}
|
||||
}
|
||||
ec->setSize(size);
|
||||
ec->set_size(size);
|
||||
debug_only(ec->mangleFreed(size));
|
||||
if (size < SmallForDictionary) {
|
||||
lock = _indexedFreeListParLocks[size];
|
||||
|
@ -1790,7 +1792,7 @@ CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
|
|||
_bt.verify_single_block(chunk, size);
|
||||
|
||||
FreeChunk* fc = (FreeChunk*) chunk;
|
||||
fc->setSize(size);
|
||||
fc->set_size(size);
|
||||
debug_only(fc->mangleFreed(size));
|
||||
if (size < SmallForDictionary) {
|
||||
returnChunkToFreeList(fc);
|
||||
|
@ -1833,7 +1835,7 @@ CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
|
|||
assert_locked();
|
||||
assert(fc != NULL, "null chunk");
|
||||
_bt.verify_single_block((HeapWord*)fc, size);
|
||||
_dictionary->removeChunk(fc);
|
||||
_dictionary->remove_chunk(fc);
|
||||
// adjust _unallocated_block upward, as necessary
|
||||
_bt.allocated((HeapWord*)fc, size);
|
||||
}
|
||||
|
@ -1848,7 +1850,7 @@ CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
|
|||
verifyIndexedFreeList(size);
|
||||
}
|
||||
)
|
||||
_indexedFreeList[size].removeChunk(fc);
|
||||
_indexedFreeList[size].remove_chunk(fc);
|
||||
NOT_PRODUCT(
|
||||
if (FLSVerifyIndexTable) {
|
||||
verifyIndexedFreeList(size);
|
||||
|
@ -1862,17 +1864,17 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
|
|||
the excess is >= MIN_CHUNK. */
|
||||
size_t start = align_object_size(numWords + MinChunkSize);
|
||||
if (start < IndexSetSize) {
|
||||
FreeList* it = _indexedFreeList;
|
||||
FreeList<FreeChunk>* it = _indexedFreeList;
|
||||
size_t hint = _indexedFreeList[start].hint();
|
||||
while (hint < IndexSetSize) {
|
||||
assert(hint % MinObjAlignment == 0, "hint should be aligned");
|
||||
FreeList *fl = &_indexedFreeList[hint];
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
|
||||
if (fl->surplus() > 0 && fl->head() != NULL) {
|
||||
// Found a list with surplus, reset original hint
|
||||
// and split out a free chunk which is returned.
|
||||
_indexedFreeList[start].set_hint(hint);
|
||||
FreeChunk* res = getFromListGreater(fl, numWords);
|
||||
assert(res == NULL || res->isFree(),
|
||||
assert(res == NULL || res->is_free(),
|
||||
"Should be returning a free chunk");
|
||||
return res;
|
||||
}
|
||||
|
@ -1885,7 +1887,7 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
|
|||
}
|
||||
|
||||
/* Requires fl->size >= numWords + MinChunkSize */
|
||||
FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
|
||||
FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
|
||||
size_t numWords) {
|
||||
FreeChunk *curr = fl->head();
|
||||
size_t oldNumWords = curr->size();
|
||||
|
@ -1894,13 +1896,13 @@ FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
|
|||
assert(oldNumWords >= numWords + MinChunkSize,
|
||||
"Size of chunks in the list is too small");
|
||||
|
||||
fl->removeChunk(curr);
|
||||
fl->remove_chunk(curr);
|
||||
// recorded indirectly by splitChunkAndReturnRemainder -
|
||||
// smallSplit(oldNumWords, numWords);
|
||||
FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
|
||||
// Does anything have to be done for the remainder in terms of
|
||||
// fixing the card table?
|
||||
assert(new_chunk == NULL || new_chunk->isFree(),
|
||||
assert(new_chunk == NULL || new_chunk->is_free(),
|
||||
"Should be returning a free chunk");
|
||||
return new_chunk;
|
||||
}
|
||||
|
@ -1918,13 +1920,13 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
|
|||
assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
|
||||
FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
|
||||
assert(is_aligned(ffc), "alignment problem");
|
||||
ffc->setSize(rem_size);
|
||||
ffc->linkNext(NULL);
|
||||
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
ffc->set_size(rem_size);
|
||||
ffc->link_next(NULL);
|
||||
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
// Above must occur before BOT is updated below.
|
||||
// adjust block offset table
|
||||
OrderAccess::storestore();
|
||||
assert(chunk->isFree() && ffc->isFree(), "Error");
|
||||
assert(chunk->is_free() && ffc->is_free(), "Error");
|
||||
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
|
||||
if (rem_size < SmallForDictionary) {
|
||||
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
|
||||
|
@ -1939,7 +1941,7 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
|
|||
returnChunkToDictionary(ffc);
|
||||
split(size ,rem_size);
|
||||
}
|
||||
chunk->setSize(new_size);
|
||||
chunk->set_size(new_size);
|
||||
return chunk;
|
||||
}
|
||||
|
||||
|
@ -2046,10 +2048,10 @@ void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
|
|||
assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
|
||||
"Minimum block size requirement");
|
||||
FreeChunk* fc = (FreeChunk*)(blk->_ptr);
|
||||
fc->setSize(blk->_word_size);
|
||||
fc->linkPrev(NULL); // mark as free
|
||||
fc->set_size(blk->_word_size);
|
||||
fc->link_prev(NULL); // mark as free
|
||||
fc->dontCoalesce();
|
||||
assert(fc->isFree(), "just marked it free");
|
||||
assert(fc->is_free(), "just marked it free");
|
||||
assert(fc->cantCoalesce(), "just marked it uncoalescable");
|
||||
}
|
||||
}
|
||||
|
@ -2149,7 +2151,7 @@ double CompactibleFreeListSpace::flsFrag() const {
|
|||
}
|
||||
|
||||
double totFree = itabFree +
|
||||
_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
|
||||
_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
|
||||
if (totFree > 0) {
|
||||
frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
|
||||
(totFree * totFree));
|
||||
|
@ -2167,16 +2169,16 @@ void CompactibleFreeListSpace::beginSweepFLCensus(
|
|||
assert_locked();
|
||||
size_t i;
|
||||
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
FreeList* fl = &_indexedFreeList[i];
|
||||
FreeList<FreeChunk>* fl = &_indexedFreeList[i];
|
||||
if (PrintFLSStatistics > 1) {
|
||||
gclog_or_tty->print("size[%d] : ", i);
|
||||
}
|
||||
fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
|
||||
fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
|
||||
fl->set_beforeSweep(fl->count());
|
||||
fl->set_bfrSurp(fl->surplus());
|
||||
fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
|
||||
fl->set_before_sweep(fl->count());
|
||||
fl->set_bfr_surp(fl->surplus());
|
||||
}
|
||||
_dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
|
||||
_dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
|
||||
inter_sweep_current,
|
||||
inter_sweep_estimate,
|
||||
intra_sweep_estimate);
|
||||
|
@ -2186,7 +2188,7 @@ void CompactibleFreeListSpace::setFLSurplus() {
|
|||
assert_locked();
|
||||
size_t i;
|
||||
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
FreeList *fl = &_indexedFreeList[i];
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[i];
|
||||
fl->set_surplus(fl->count() -
|
||||
(ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
|
||||
}
|
||||
|
@ -2197,7 +2199,7 @@ void CompactibleFreeListSpace::setFLHints() {
|
|||
size_t i;
|
||||
size_t h = IndexSetSize;
|
||||
for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
|
||||
FreeList *fl = &_indexedFreeList[i];
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[i];
|
||||
fl->set_hint(h);
|
||||
if (fl->surplus() > 0) {
|
||||
h = i;
|
||||
|
@ -2209,18 +2211,18 @@ void CompactibleFreeListSpace::clearFLCensus() {
|
|||
assert_locked();
|
||||
size_t i;
|
||||
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
FreeList *fl = &_indexedFreeList[i];
|
||||
fl->set_prevSweep(fl->count());
|
||||
fl->set_coalBirths(0);
|
||||
fl->set_coalDeaths(0);
|
||||
fl->set_splitBirths(0);
|
||||
fl->set_splitDeaths(0);
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[i];
|
||||
fl->set_prev_sweep(fl->count());
|
||||
fl->set_coal_births(0);
|
||||
fl->set_coal_deaths(0);
|
||||
fl->set_split_births(0);
|
||||
fl->set_split_deaths(0);
|
||||
}
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
|
||||
if (PrintFLSStatistics > 0) {
|
||||
HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
|
||||
HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
|
||||
gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
|
||||
largestAddr);
|
||||
}
|
||||
|
@ -2231,30 +2233,30 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
|
|||
}
|
||||
clearFLCensus();
|
||||
assert_locked();
|
||||
_dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
|
||||
_dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
|
||||
}
|
||||
|
||||
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
|
||||
if (size < SmallForDictionary) {
|
||||
FreeList *fl = &_indexedFreeList[size];
|
||||
return (fl->coalDesired() < 0) ||
|
||||
((int)fl->count() > fl->coalDesired());
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
|
||||
return (fl->coal_desired() < 0) ||
|
||||
((int)fl->count() > fl->coal_desired());
|
||||
} else {
|
||||
return dictionary()->coalDictOverPopulated(size);
|
||||
return dictionary()->coal_dict_over_populated(size);
|
||||
}
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
|
||||
assert(size < SmallForDictionary, "Size too large for indexed list");
|
||||
FreeList *fl = &_indexedFreeList[size];
|
||||
fl->increment_coalBirths();
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
|
||||
fl->increment_coal_births();
|
||||
fl->increment_surplus();
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
|
||||
assert(size < SmallForDictionary, "Size too large for indexed list");
|
||||
FreeList *fl = &_indexedFreeList[size];
|
||||
fl->increment_coalDeaths();
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
|
||||
fl->increment_coal_deaths();
|
||||
fl->decrement_surplus();
|
||||
}
|
||||
|
||||
|
@ -2262,7 +2264,7 @@ void CompactibleFreeListSpace::coalBirth(size_t size) {
|
|||
if (size < SmallForDictionary) {
|
||||
smallCoalBirth(size);
|
||||
} else {
|
||||
dictionary()->dictCensusUpdate(size,
|
||||
dictionary()->dict_census_udpate(size,
|
||||
false /* split */,
|
||||
true /* birth */);
|
||||
}
|
||||
|
@ -2272,7 +2274,7 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
|
|||
if(size < SmallForDictionary) {
|
||||
smallCoalDeath(size);
|
||||
} else {
|
||||
dictionary()->dictCensusUpdate(size,
|
||||
dictionary()->dict_census_udpate(size,
|
||||
false /* split */,
|
||||
false /* birth */);
|
||||
}
|
||||
|
@ -2280,23 +2282,23 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
|
|||
|
||||
void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
|
||||
assert(size < SmallForDictionary, "Size too large for indexed list");
|
||||
FreeList *fl = &_indexedFreeList[size];
|
||||
fl->increment_splitBirths();
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
|
||||
fl->increment_split_births();
|
||||
fl->increment_surplus();
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
|
||||
assert(size < SmallForDictionary, "Size too large for indexed list");
|
||||
FreeList *fl = &_indexedFreeList[size];
|
||||
fl->increment_splitDeaths();
|
||||
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
|
||||
fl->increment_split_deaths();
|
||||
fl->decrement_surplus();
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::splitBirth(size_t size) {
|
||||
void CompactibleFreeListSpace::split_birth(size_t size) {
|
||||
if (size < SmallForDictionary) {
|
||||
smallSplitBirth(size);
|
||||
} else {
|
||||
dictionary()->dictCensusUpdate(size,
|
||||
dictionary()->dict_census_udpate(size,
|
||||
true /* split */,
|
||||
true /* birth */);
|
||||
}
|
||||
|
@ -2306,7 +2308,7 @@ void CompactibleFreeListSpace::splitDeath(size_t size) {
|
|||
if (size < SmallForDictionary) {
|
||||
smallSplitDeath(size);
|
||||
} else {
|
||||
dictionary()->dictCensusUpdate(size,
|
||||
dictionary()->dict_census_udpate(size,
|
||||
true /* split */,
|
||||
false /* birth */);
|
||||
}
|
||||
|
@ -2315,8 +2317,8 @@ void CompactibleFreeListSpace::splitDeath(size_t size) {
|
|||
void CompactibleFreeListSpace::split(size_t from, size_t to1) {
|
||||
size_t to2 = from - to1;
|
||||
splitDeath(from);
|
||||
splitBirth(to1);
|
||||
splitBirth(to2);
|
||||
split_birth(to1);
|
||||
split_birth(to2);
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::print() const {
|
||||
|
@ -2362,7 +2364,7 @@ class VerifyAllBlksClosure: public BlkClosure {
|
|||
FreeChunk* fc = (FreeChunk*)addr;
|
||||
res = fc->size();
|
||||
if (FLSVerifyLists && !fc->cantCoalesce()) {
|
||||
guarantee(_sp->verifyChunkInFreeLists(fc),
|
||||
guarantee(_sp->verify_chunk_in_free_list(fc),
|
||||
"Chunk should be on a free list");
|
||||
}
|
||||
}
|
||||
|
@ -2518,7 +2520,7 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
|
|||
"Slot should have been empty");
|
||||
for (; fc != NULL; fc = fc->next(), n++) {
|
||||
guarantee(fc->size() == size, "Size inconsistency");
|
||||
guarantee(fc->isFree(), "!free?");
|
||||
guarantee(fc->is_free(), "!free?");
|
||||
guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
|
||||
guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
|
||||
}
|
||||
|
@ -2527,10 +2529,10 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
|
|||
|
||||
#ifndef PRODUCT
|
||||
void CompactibleFreeListSpace::check_free_list_consistency() const {
|
||||
assert(_dictionary->minSize() <= IndexSetSize,
|
||||
assert(_dictionary->min_size() <= IndexSetSize,
|
||||
"Some sizes can't be allocated without recourse to"
|
||||
" linear allocation buffers");
|
||||
assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
|
||||
assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
|
||||
"else MIN_TREE_CHUNK_SIZE is wrong");
|
||||
assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit
|
||||
(IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit
|
||||
|
@ -2543,36 +2545,36 @@ void CompactibleFreeListSpace::check_free_list_consistency() const {
|
|||
|
||||
void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
|
||||
assert_lock_strong(&_freelistLock);
|
||||
FreeList total;
|
||||
FreeList<FreeChunk> total;
|
||||
gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
|
||||
FreeList::print_labels_on(gclog_or_tty, "size");
|
||||
size_t totalFree = 0;
|
||||
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
|
||||
size_t total_free = 0;
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
const FreeList *fl = &_indexedFreeList[i];
|
||||
totalFree += fl->count() * fl->size();
|
||||
const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
|
||||
total_free += fl->count() * fl->size();
|
||||
if (i % (40*IndexSetStride) == 0) {
|
||||
FreeList::print_labels_on(gclog_or_tty, "size");
|
||||
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
|
||||
}
|
||||
fl->print_on(gclog_or_tty);
|
||||
total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
|
||||
total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
|
||||
total.set_surplus( total.surplus() + fl->surplus() );
|
||||
total.set_desired( total.desired() + fl->desired() );
|
||||
total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
|
||||
total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
|
||||
total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
|
||||
total.set_before_sweep(total.before_sweep() + fl->before_sweep());
|
||||
total.set_count( total.count() + fl->count() );
|
||||
total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
|
||||
total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
|
||||
total.set_splitBirths(total.splitBirths() + fl->splitBirths());
|
||||
total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
|
||||
total.set_coal_births( total.coal_births() + fl->coal_births() );
|
||||
total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
|
||||
total.set_split_births(total.split_births() + fl->split_births());
|
||||
total.set_split_deaths(total.split_deaths() + fl->split_deaths());
|
||||
}
|
||||
total.print_on(gclog_or_tty, "TOTAL");
|
||||
gclog_or_tty->print_cr("Total free in indexed lists "
|
||||
SIZE_FORMAT " words", totalFree);
|
||||
SIZE_FORMAT " words", total_free);
|
||||
gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
|
||||
(double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
|
||||
(total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
|
||||
(double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
|
||||
(total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
|
||||
(double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
|
||||
_dictionary->printDictCensus();
|
||||
_dictionary->print_dict_census();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
@ -2634,18 +2636,18 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
|
|||
res = _cfls->getChunkFromDictionaryExact(word_sz);
|
||||
if (res == NULL) return NULL;
|
||||
} else {
|
||||
FreeList* fl = &_indexedFreeList[word_sz];
|
||||
FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
|
||||
if (fl->count() == 0) {
|
||||
// Attempt to refill this local free list.
|
||||
get_from_global_pool(word_sz, fl);
|
||||
// If it didn't work, give up.
|
||||
if (fl->count() == 0) return NULL;
|
||||
}
|
||||
res = fl->getChunkAtHead();
|
||||
res = fl->get_chunk_at_head();
|
||||
assert(res != NULL, "Why was count non-zero?");
|
||||
}
|
||||
res->markNotFree();
|
||||
assert(!res->isFree(), "shouldn't be marked free");
|
||||
assert(!res->is_free(), "shouldn't be marked free");
|
||||
assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
|
||||
// mangle a just allocated object with a distinct pattern.
|
||||
debug_only(res->mangleAllocated(word_sz));
|
||||
|
@ -2654,7 +2656,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
|
|||
|
||||
// Get a chunk of blocks of the right size and update related
|
||||
// book-keeping stats
|
||||
void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
|
||||
void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
|
||||
// Get the #blocks we want to claim
|
||||
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
|
||||
assert(n_blks > 0, "Error");
|
||||
|
@ -2736,7 +2738,7 @@ void CFLS_LAB::retire(int tid) {
|
|||
if (num_retire > 0) {
|
||||
_cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
|
||||
// Reset this list.
|
||||
_indexedFreeList[i] = FreeList();
|
||||
_indexedFreeList[i] = FreeList<FreeChunk>();
|
||||
_indexedFreeList[i].set_size(i);
|
||||
}
|
||||
}
|
||||
|
@ -2750,7 +2752,7 @@ void CFLS_LAB::retire(int tid) {
|
|||
}
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
|
||||
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
|
||||
assert(fl->count() == 0, "Precondition.");
|
||||
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
|
||||
"Precondition");
|
||||
|
@ -2766,12 +2768,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
(cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
|
||||
(CMSSplitIndexedFreeListBlocks || k <= 1);
|
||||
k++, cur_sz = k * word_sz) {
|
||||
FreeList fl_for_cur_sz; // Empty.
|
||||
FreeList<FreeChunk> fl_for_cur_sz; // Empty.
|
||||
fl_for_cur_sz.set_size(cur_sz);
|
||||
{
|
||||
MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
FreeList* gfl = &_indexedFreeList[cur_sz];
|
||||
FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
|
||||
if (gfl->count() != 0) {
|
||||
// nn is the number of chunks of size cur_sz that
|
||||
// we'd need to split k-ways each, in order to create
|
||||
|
@ -2784,9 +2786,9 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
// we increment the split death count by the number of blocks
|
||||
// we just took from the cur_sz-size blocks list and which
|
||||
// we will be splitting below.
|
||||
ssize_t deaths = gfl->splitDeaths() +
|
||||
ssize_t deaths = gfl->split_deaths() +
|
||||
fl_for_cur_sz.count();
|
||||
gfl->set_splitDeaths(deaths);
|
||||
gfl->set_split_deaths(deaths);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2797,21 +2799,21 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
} else {
|
||||
// Divide each block on fl_for_cur_sz up k ways.
|
||||
FreeChunk* fc;
|
||||
while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
|
||||
while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
|
||||
// Must do this in reverse order, so that anybody attempting to
|
||||
// access the main chunk sees it as a single free block until we
|
||||
// change it.
|
||||
size_t fc_size = fc->size();
|
||||
assert(fc->isFree(), "Error");
|
||||
assert(fc->is_free(), "Error");
|
||||
for (int i = k-1; i >= 0; i--) {
|
||||
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
|
||||
assert((i != 0) ||
|
||||
((fc == ffc) && ffc->isFree() &&
|
||||
((fc == ffc) && ffc->is_free() &&
|
||||
(ffc->size() == k*word_sz) && (fc_size == word_sz)),
|
||||
"Counting error");
|
||||
ffc->setSize(word_sz);
|
||||
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
ffc->linkNext(NULL);
|
||||
ffc->set_size(word_sz);
|
||||
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
ffc->link_next(NULL);
|
||||
// Above must occur before BOT is updated below.
|
||||
OrderAccess::storestore();
|
||||
// splitting from the right, fc_size == i * word_sz
|
||||
|
@ -2822,7 +2824,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
_bt.verify_single_block((HeapWord*)fc, fc_size);
|
||||
_bt.verify_single_block((HeapWord*)ffc, word_sz);
|
||||
// Push this on "fl".
|
||||
fl->returnChunkAtHead(ffc);
|
||||
fl->return_chunk_at_head(ffc);
|
||||
}
|
||||
// TRAP
|
||||
assert(fl->tail()->next() == NULL, "List invariant.");
|
||||
|
@ -2832,8 +2834,8 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
size_t num = fl->count();
|
||||
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
|
||||
_indexedFreeList[word_sz].set_splitBirths(births);
|
||||
ssize_t births = _indexedFreeList[word_sz].split_births() + num;
|
||||
_indexedFreeList[word_sz].set_split_births(births);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -2846,12 +2848,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
MutexLockerEx x(parDictionaryAllocLock(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
while (n > 0) {
|
||||
fc = dictionary()->getChunk(MAX2(n * word_sz,
|
||||
_dictionary->minSize()),
|
||||
FreeBlockDictionary::atLeast);
|
||||
fc = dictionary()->get_chunk(MAX2(n * word_sz,
|
||||
_dictionary->min_size()),
|
||||
FreeBlockDictionary<FreeChunk>::atLeast);
|
||||
if (fc != NULL) {
|
||||
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
|
||||
dictionary()->dictCensusUpdate(fc->size(),
|
||||
dictionary()->dict_census_udpate(fc->size(),
|
||||
true /*split*/,
|
||||
false /*birth*/);
|
||||
break;
|
||||
|
@ -2862,7 +2864,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
if (fc == NULL) return;
|
||||
// Otherwise, split up that block.
|
||||
assert((ssize_t)n >= 1, "Control point invariant");
|
||||
assert(fc->isFree(), "Error: should be a free block");
|
||||
assert(fc->is_free(), "Error: should be a free block");
|
||||
_bt.verify_single_block((HeapWord*)fc, fc->size());
|
||||
const size_t nn = fc->size() / word_sz;
|
||||
n = MIN2(nn, n);
|
||||
|
@ -2893,18 +2895,18 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
if (rem > 0) {
|
||||
size_t prefix_size = n * word_sz;
|
||||
rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
|
||||
rem_fc->setSize(rem);
|
||||
rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
rem_fc->linkNext(NULL);
|
||||
rem_fc->set_size(rem);
|
||||
rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
rem_fc->link_next(NULL);
|
||||
// Above must occur before BOT is updated below.
|
||||
assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
|
||||
OrderAccess::storestore();
|
||||
_bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
|
||||
assert(fc->isFree(), "Error");
|
||||
fc->setSize(prefix_size);
|
||||
assert(fc->is_free(), "Error");
|
||||
fc->set_size(prefix_size);
|
||||
if (rem >= IndexSetSize) {
|
||||
returnChunkToDictionary(rem_fc);
|
||||
dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
|
||||
dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
|
||||
rem_fc = NULL;
|
||||
}
|
||||
// Otherwise, return it to the small list below.
|
||||
|
@ -2914,7 +2916,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
MutexLockerEx x(_indexedFreeListParLocks[rem],
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
_bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
|
||||
_indexedFreeList[rem].returnChunkAtHead(rem_fc);
|
||||
_indexedFreeList[rem].return_chunk_at_head(rem_fc);
|
||||
smallSplitBirth(rem);
|
||||
}
|
||||
assert((ssize_t)n > 0 && fc != NULL, "Consistency");
|
||||
|
@ -2926,9 +2928,9 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
// All but first chunk in this loop
|
||||
for (ssize_t i = n-1; i > 0; i--) {
|
||||
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
|
||||
ffc->setSize(word_sz);
|
||||
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
ffc->linkNext(NULL);
|
||||
ffc->set_size(word_sz);
|
||||
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
|
||||
ffc->link_next(NULL);
|
||||
// Above must occur before BOT is updated below.
|
||||
OrderAccess::storestore();
|
||||
// splitting from the right, fc_size == (n - i + 1) * wordsize
|
||||
|
@ -2938,25 +2940,25 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
|||
_bt.verify_single_block((HeapWord*)ffc, ffc->size());
|
||||
_bt.verify_single_block((HeapWord*)fc, fc_size);
|
||||
// Push this on "fl".
|
||||
fl->returnChunkAtHead(ffc);
|
||||
fl->return_chunk_at_head(ffc);
|
||||
}
|
||||
// First chunk
|
||||
assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
|
||||
assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
|
||||
// The blocks above should show their new sizes before the first block below
|
||||
fc->setSize(word_sz);
|
||||
fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above
|
||||
fc->linkNext(NULL);
|
||||
fc->set_size(word_sz);
|
||||
fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
|
||||
fc->link_next(NULL);
|
||||
_bt.verify_not_unallocated((HeapWord*)fc, fc->size());
|
||||
_bt.verify_single_block((HeapWord*)fc, fc->size());
|
||||
fl->returnChunkAtHead(fc);
|
||||
fl->return_chunk_at_head(fc);
|
||||
|
||||
assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
|
||||
{
|
||||
// Update the stats for this block size.
|
||||
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
|
||||
_indexedFreeList[word_sz].set_splitBirths(births);
|
||||
const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
|
||||
_indexedFreeList[word_sz].set_split_births(births);
|
||||
// ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
|
||||
// _indexedFreeList[word_sz].set_surplus(new_surplus);
|
||||
}
|
||||
|
|
|
@ -25,10 +25,10 @@
|
|||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
|
||||
|
||||
#include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeList.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
|
||||
#include "memory/binaryTreeDictionary.hpp"
|
||||
#include "memory/blockOffsetTable.inline.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
#include "memory/space.hpp"
|
||||
|
||||
// Classes in support of keeping track of promotions into a non-Contiguous
|
||||
|
@ -129,10 +129,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// Linear allocation blocks
|
||||
LinearAllocBlock _smallLinearAllocBlock;
|
||||
|
||||
FreeBlockDictionary::DictionaryChoice _dictionaryChoice;
|
||||
FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
|
||||
FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks
|
||||
|
||||
FreeList _indexedFreeList[IndexSetSize];
|
||||
FreeList<FreeChunk> _indexedFreeList[IndexSetSize];
|
||||
// indexed array for small size blocks
|
||||
// allocation stategy
|
||||
bool _fitStrategy; // Use best fit strategy.
|
||||
|
@ -169,7 +169,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// If the count of "fl" is negative, it's absolute value indicates a
|
||||
// number of free chunks that had been previously "borrowed" from global
|
||||
// list of size "word_sz", and must now be decremented.
|
||||
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl);
|
||||
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
|
||||
|
||||
// Allocation helper functions
|
||||
// Allocate using a strategy that takes from the indexed free lists
|
||||
|
@ -215,7 +215,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// and return it. The split off remainder is returned to
|
||||
// the free lists. The old name for getFromListGreater
|
||||
// was lookInListGreater.
|
||||
FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
|
||||
FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords);
|
||||
// Get a chunk in the indexed free list or dictionary,
|
||||
// by considering a larger chunk and splitting it.
|
||||
FreeChunk* getChunkFromGreater(size_t numWords);
|
||||
|
@ -286,10 +286,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// Constructor...
|
||||
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
|
||||
bool use_adaptive_freelists,
|
||||
FreeBlockDictionary::DictionaryChoice);
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
|
||||
// accessors
|
||||
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
|
||||
FreeBlockDictionary* dictionary() const { return _dictionary; }
|
||||
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
|
||||
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
|
||||
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
|
||||
|
||||
|
@ -499,7 +499,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// Verify that the given chunk is in the free lists:
|
||||
// i.e. either the binary tree dictionary, the indexed free lists
|
||||
// or the linear allocation block.
|
||||
bool verifyChunkInFreeLists(FreeChunk* fc) const;
|
||||
bool verify_chunk_in_free_list(FreeChunk* fc) const;
|
||||
// Verify that the given chunk is the linear allocation block
|
||||
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
|
||||
// Do some basic checks on the the free lists.
|
||||
|
@ -608,7 +608,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
void coalDeath(size_t size);
|
||||
void smallSplitBirth(size_t size);
|
||||
void smallSplitDeath(size_t size);
|
||||
void splitBirth(size_t size);
|
||||
void split_birth(size_t size);
|
||||
void splitDeath(size_t size);
|
||||
void split(size_t from, size_t to1);
|
||||
|
||||
|
@ -622,7 +622,7 @@ class CFLS_LAB : public CHeapObj {
|
|||
CompactibleFreeListSpace* _cfls;
|
||||
|
||||
// Our local free lists.
|
||||
FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
|
||||
FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
|
||||
|
||||
// Initialized from a command-line arg.
|
||||
|
||||
|
@ -635,7 +635,7 @@ class CFLS_LAB : public CHeapObj {
|
|||
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
|
||||
|
||||
// Internal work method
|
||||
void get_from_global_pool(size_t word_sz, FreeList* fl);
|
||||
void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl);
|
||||
|
||||
public:
|
||||
CFLS_LAB(CompactibleFreeListSpace* cfls);
|
||||
|
|
|
@ -188,7 +188,7 @@ class CMSParGCThreadState: public CHeapObj {
|
|||
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
ReservedSpace rs, size_t initial_byte_size, int level,
|
||||
CardTableRS* ct, bool use_adaptive_freelists,
|
||||
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
|
||||
CardGeneration(rs, initial_byte_size, level, ct),
|
||||
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
|
||||
_debug_collection_type(Concurrent_collection_type)
|
||||
|
@ -1026,7 +1026,7 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
|
|||
// its mark-bit or P-bits not yet set. Such objects need
|
||||
// to be safely navigable by block_start().
|
||||
assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
|
||||
assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size");
|
||||
assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
|
||||
collector()->direct_allocated(res, adjustedSize);
|
||||
_direct_allocated_words += adjustedSize;
|
||||
// allocation counters
|
||||
|
@ -1391,7 +1391,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
|||
oop obj = oop(obj_ptr);
|
||||
OrderAccess::storestore();
|
||||
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
|
||||
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
|
||||
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
|
||||
// IMPORTANT: See note on object initialization for CMS above.
|
||||
// Otherwise, copy the object. Here we must be careful to insert the
|
||||
// klass pointer last, since this marks the block as an allocated object.
|
||||
|
@ -1400,7 +1400,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
|||
// Restore the mark word copied above.
|
||||
obj->set_mark(m);
|
||||
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
|
||||
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
|
||||
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
|
||||
OrderAccess::storestore();
|
||||
|
||||
if (UseCompressedOops) {
|
||||
|
@ -1421,7 +1421,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
|||
promoInfo->track((PromotedObject*)obj, old->klass());
|
||||
}
|
||||
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
|
||||
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
|
||||
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
|
||||
assert(old->is_oop(), "Will use and dereference old klass ptr below");
|
||||
|
||||
// Finally, install the klass pointer (this should be volatile).
|
||||
|
@ -2034,7 +2034,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
|||
pointer_delta(cms_space->end(), cms_space->compaction_top())
|
||||
* HeapWordSize,
|
||||
"All the free space should be compacted into one chunk at top");
|
||||
assert(cms_space->dictionary()->totalChunkSize(
|
||||
assert(cms_space->dictionary()->total_chunk_size(
|
||||
debug_only(cms_space->freelistLock())) == 0 ||
|
||||
cms_space->totalSizeInIndexedFreeLists() == 0,
|
||||
"All the free space should be in a single chunk");
|
||||
|
@ -6131,7 +6131,7 @@ void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
|
|||
double nearLargestPercent = FLSLargestBlockCoalesceProximity;
|
||||
HeapWord* minAddr = _cmsSpace->bottom();
|
||||
HeapWord* largestAddr =
|
||||
(HeapWord*) _cmsSpace->dictionary()->findLargestDict();
|
||||
(HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
|
||||
if (largestAddr == NULL) {
|
||||
// The dictionary appears to be empty. In this case
|
||||
// try to coalesce at the end of the heap.
|
||||
|
@ -7906,7 +7906,7 @@ SweepClosure::SweepClosure(CMSCollector* collector,
|
|||
_last_fc = NULL;
|
||||
|
||||
_sp->initializeIndexedFreeListArrayReturnedBytes();
|
||||
_sp->dictionary()->initializeDictReturnedBytes();
|
||||
_sp->dictionary()->initialize_dict_returned_bytes();
|
||||
)
|
||||
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
|
||||
"sweep _limit out of bounds");
|
||||
|
@ -7954,13 +7954,13 @@ SweepClosure::~SweepClosure() {
|
|||
|
||||
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
|
||||
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
|
||||
size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
|
||||
size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
|
||||
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
|
||||
size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
|
||||
size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
|
||||
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
|
||||
gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
|
||||
indexListReturnedBytes);
|
||||
gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
|
||||
dictReturnedBytes);
|
||||
dict_returned_bytes);
|
||||
}
|
||||
}
|
||||
if (CMSTraceSweeper) {
|
||||
|
@ -7985,9 +7985,9 @@ void SweepClosure::initialize_free_range(HeapWord* freeFinger,
|
|||
if (CMSTestInFreeList) {
|
||||
if (freeRangeInFreeLists) {
|
||||
FreeChunk* fc = (FreeChunk*) freeFinger;
|
||||
assert(fc->isFree(), "A chunk on the free list should be free.");
|
||||
assert(fc->is_free(), "A chunk on the free list should be free.");
|
||||
assert(fc->size() > 0, "Free range should have a size");
|
||||
assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
|
||||
assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8057,7 +8057,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
|||
assert(addr < _limit, "sweep invariant");
|
||||
// check if we should yield
|
||||
do_yield_check(addr);
|
||||
if (fc->isFree()) {
|
||||
if (fc->is_free()) {
|
||||
// Chunk that is already free
|
||||
res = fc->size();
|
||||
do_already_free_chunk(fc);
|
||||
|
@ -8145,7 +8145,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
|||
// Chunks that cannot be coalesced are not in the
|
||||
// free lists.
|
||||
if (CMSTestInFreeList && !fc->cantCoalesce()) {
|
||||
assert(_sp->verifyChunkInFreeLists(fc),
|
||||
assert(_sp->verify_chunk_in_free_list(fc),
|
||||
"free chunk should be in free lists");
|
||||
}
|
||||
// a chunk that is already free, should not have been
|
||||
|
@ -8171,7 +8171,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
|||
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
|
||||
assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
|
||||
if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
|
||||
nextChunk->isFree() && // ... which is free...
|
||||
nextChunk->is_free() && // ... which is free...
|
||||
nextChunk->cantCoalesce()) { // ... but can't be coalesced
|
||||
// nothing to do
|
||||
} else {
|
||||
|
@ -8203,7 +8203,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
|||
assert(ffc->size() == pointer_delta(addr, freeFinger()),
|
||||
"Size of free range is inconsistent with chunk size.");
|
||||
if (CMSTestInFreeList) {
|
||||
assert(_sp->verifyChunkInFreeLists(ffc),
|
||||
assert(_sp->verify_chunk_in_free_list(ffc),
|
||||
"free range is not in free lists");
|
||||
}
|
||||
_sp->removeFreeChunkFromFreeLists(ffc);
|
||||
|
@ -8262,7 +8262,7 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
|
|||
assert(ffc->size() == pointer_delta(addr, freeFinger()),
|
||||
"Size of free range is inconsistent with chunk size.");
|
||||
if (CMSTestInFreeList) {
|
||||
assert(_sp->verifyChunkInFreeLists(ffc),
|
||||
assert(_sp->verify_chunk_in_free_list(ffc),
|
||||
"free range is not in free lists");
|
||||
}
|
||||
_sp->removeFreeChunkFromFreeLists(ffc);
|
||||
|
@ -8351,11 +8351,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
|||
size_t chunkSize) {
|
||||
// do_post_free_or_garbage_chunk() should only be called in the case
|
||||
// of the adaptive free list allocator.
|
||||
const bool fcInFreeLists = fc->isFree();
|
||||
const bool fcInFreeLists = fc->is_free();
|
||||
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
|
||||
assert((HeapWord*)fc <= _limit, "sweep invariant");
|
||||
if (CMSTestInFreeList && fcInFreeLists) {
|
||||
assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
|
||||
assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
|
||||
}
|
||||
|
||||
if (CMSTraceSweeper) {
|
||||
|
@ -8410,7 +8410,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
|||
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
|
||||
"Size of free range is inconsistent with chunk size.");
|
||||
if (CMSTestInFreeList) {
|
||||
assert(_sp->verifyChunkInFreeLists(ffc),
|
||||
assert(_sp->verify_chunk_in_free_list(ffc),
|
||||
"Chunk is not in free lists");
|
||||
}
|
||||
_sp->coalDeath(ffc->size());
|
||||
|
@ -8459,7 +8459,7 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
|
|||
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
|
||||
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
|
||||
if (eob >= _limit) {
|
||||
assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
|
||||
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
|
||||
"[" PTR_FORMAT "," PTR_FORMAT ") in space "
|
||||
|
@ -8482,8 +8482,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
|
|||
if (!freeRangeInFreeLists()) {
|
||||
if (CMSTestInFreeList) {
|
||||
FreeChunk* fc = (FreeChunk*) chunk;
|
||||
fc->setSize(size);
|
||||
assert(!_sp->verifyChunkInFreeLists(fc),
|
||||
fc->set_size(size);
|
||||
assert(!_sp->verify_chunk_in_free_list(fc),
|
||||
"chunk should not be in free lists yet");
|
||||
}
|
||||
if (CMSTraceSweeper) {
|
||||
|
@ -8557,8 +8557,8 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
|
|||
// This is actually very useful in a product build if it can
|
||||
// be called from the debugger. Compile it into the product
|
||||
// as needed.
|
||||
bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
|
||||
return debug_cms_space->verifyChunkInFreeLists(fc);
|
||||
bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
|
||||
return debug_cms_space->verify_chunk_in_free_list(fc);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -9255,7 +9255,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
|
|||
size_t chunk_at_end_old_size = chunk_at_end->size();
|
||||
assert(chunk_at_end_old_size >= word_size_change,
|
||||
"Shrink is too large");
|
||||
chunk_at_end->setSize(chunk_at_end_old_size -
|
||||
chunk_at_end->set_size(chunk_at_end_old_size -
|
||||
word_size_change);
|
||||
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
|
||||
word_size_change);
|
||||
|
|
|
@ -25,10 +25,10 @@
|
|||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
|
||||
|
||||
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
|
||||
#include "gc_implementation/shared/gSpaceCounters.hpp"
|
||||
#include "gc_implementation/shared/gcStats.hpp"
|
||||
#include "gc_implementation/shared/generationCounters.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/virtualspace.hpp"
|
||||
|
@ -1106,7 +1106,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
|
||||
int level, CardTableRS* ct,
|
||||
bool use_adaptive_freelists,
|
||||
FreeBlockDictionary::DictionaryChoice);
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
|
||||
|
||||
// Accessors
|
||||
CMSCollector* collector() const { return _collector; }
|
||||
|
@ -1328,7 +1328,7 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
|
|||
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
|
||||
int level, CardTableRS* ct,
|
||||
bool use_adaptive_freelists,
|
||||
FreeBlockDictionary::DictionaryChoice
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice
|
||||
dictionaryChoice) :
|
||||
ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
|
||||
use_adaptive_freelists, dictionaryChoice) {}
|
||||
|
|
|
@ -23,7 +23,8 @@
|
|||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
|
|
@ -75,20 +75,20 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
|
|||
// calls. We really want the read of _mark and _prev from this pointer
|
||||
// to be volatile but making the fields volatile causes all sorts of
|
||||
// compilation errors.
|
||||
return ((volatile FreeChunk*)addr)->isFree();
|
||||
return ((volatile FreeChunk*)addr)->is_free();
|
||||
}
|
||||
|
||||
bool isFree() const volatile {
|
||||
bool is_free() const volatile {
|
||||
LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
|
||||
return (((intptr_t)_prev) & 0x1) == 0x1;
|
||||
}
|
||||
bool cantCoalesce() const {
|
||||
assert(isFree(), "can't get coalesce bit on not free");
|
||||
assert(is_free(), "can't get coalesce bit on not free");
|
||||
return (((intptr_t)_prev) & 0x2) == 0x2;
|
||||
}
|
||||
void dontCoalesce() {
|
||||
// the block should be free
|
||||
assert(isFree(), "Should look like a free block");
|
||||
assert(is_free(), "Should look like a free block");
|
||||
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
|
||||
}
|
||||
FreeChunk* prev() const {
|
||||
|
@ -103,23 +103,23 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
|
|||
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
|
||||
return _size;
|
||||
}
|
||||
void setSize(size_t sz) {
|
||||
void set_size(size_t sz) {
|
||||
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
|
||||
_size = sz;
|
||||
}
|
||||
|
||||
FreeChunk* next() const { return _next; }
|
||||
|
||||
void linkAfter(FreeChunk* ptr) {
|
||||
linkNext(ptr);
|
||||
if (ptr != NULL) ptr->linkPrev(this);
|
||||
void link_after(FreeChunk* ptr) {
|
||||
link_next(ptr);
|
||||
if (ptr != NULL) ptr->link_prev(this);
|
||||
}
|
||||
void linkNext(FreeChunk* ptr) { _next = ptr; }
|
||||
void linkPrev(FreeChunk* ptr) {
|
||||
void link_next(FreeChunk* ptr) { _next = ptr; }
|
||||
void link_prev(FreeChunk* ptr) {
|
||||
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
|
||||
_prev = (FreeChunk*)((intptr_t)ptr | 0x1);
|
||||
}
|
||||
void clearNext() { _next = NULL; }
|
||||
void clear_next() { _next = NULL; }
|
||||
void markNotFree() {
|
||||
// Set _prev (klass) to null before (if) clearing the mark word below
|
||||
_prev = NULL;
|
||||
|
@ -129,7 +129,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
|
|||
set_mark(markOopDesc::prototype());
|
||||
}
|
||||
#endif
|
||||
assert(!isFree(), "Error");
|
||||
assert(!is_free(), "Error");
|
||||
}
|
||||
|
||||
// Return the address past the end of this chunk
|
||||
|
|
|
@ -121,7 +121,7 @@ void PromotionInfo::track(PromotedObject* trackOop) {
|
|||
void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
|
||||
// make a copy of header as it may need to be spooled
|
||||
markOop mark = oop(trackOop)->mark();
|
||||
trackOop->clearNext();
|
||||
trackOop->clear_next();
|
||||
if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
|
||||
// save non-prototypical header, and mark oop
|
||||
saveDisplacedHeader(mark);
|
||||
|
|
|
@ -43,7 +43,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
|
|||
// whose position will depend on endian-ness of the platform.
|
||||
// This is so that there is no interference with the
|
||||
// cms_free_bit occupying bit position 7 (lsb == 0)
|
||||
// when we are using compressed oops; see FreeChunk::isFree().
|
||||
// when we are using compressed oops; see FreeChunk::is_free().
|
||||
// We cannot move the cms_free_bit down because currently
|
||||
// biased locking code assumes that age bits are contiguous
|
||||
// with the lock bits. Even if that assumption were relaxed,
|
||||
|
@ -65,7 +65,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
|
|||
};
|
||||
public:
|
||||
inline PromotedObject* next() const {
|
||||
assert(!((FreeChunk*)this)->isFree(), "Error");
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
PromotedObject* res;
|
||||
if (UseCompressedOops) {
|
||||
// The next pointer is a compressed oop stored in the top 32 bits
|
||||
|
@ -85,27 +85,27 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
|
|||
} else {
|
||||
_next |= (intptr_t)x;
|
||||
}
|
||||
assert(!((FreeChunk*)this)->isFree(), "Error");
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
}
|
||||
inline void setPromotedMark() {
|
||||
_next |= promoted_mask;
|
||||
assert(!((FreeChunk*)this)->isFree(), "Error");
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
}
|
||||
inline bool hasPromotedMark() const {
|
||||
assert(!((FreeChunk*)this)->isFree(), "Error");
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
return (_next & promoted_mask) == promoted_mask;
|
||||
}
|
||||
inline void setDisplacedMark() {
|
||||
_next |= displaced_mark;
|
||||
assert(!((FreeChunk*)this)->isFree(), "Error");
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
}
|
||||
inline bool hasDisplacedMark() const {
|
||||
assert(!((FreeChunk*)this)->isFree(), "Error");
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
return (_next & displaced_mark) != 0;
|
||||
}
|
||||
inline void clearNext() {
|
||||
inline void clear_next() {
|
||||
_next = 0;
|
||||
assert(!((FreeChunk*)this)->isFree(), "Error");
|
||||
assert(!((FreeChunk*)this)->is_free(), "Error");
|
||||
}
|
||||
debug_only(void *next_addr() { return (void *) &_next; })
|
||||
};
|
||||
|
|
|
@ -44,11 +44,11 @@
|
|||
nonstatic_field(FreeChunk, _next, FreeChunk*) \
|
||||
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
|
||||
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
|
||||
nonstatic_field(FreeList, _size, size_t) \
|
||||
nonstatic_field(FreeList, _count, ssize_t) \
|
||||
nonstatic_field(BinaryTreeDictionary, _totalSize, size_t) \
|
||||
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary*) \
|
||||
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList) \
|
||||
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
|
||||
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
|
||||
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size, size_t) \
|
||||
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary<FreeChunk>*) \
|
||||
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
|
||||
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
|
||||
|
||||
|
||||
|
@ -70,13 +70,13 @@
|
|||
declare_toplevel_type(CompactibleFreeListSpace*) \
|
||||
declare_toplevel_type(CMSCollector*) \
|
||||
declare_toplevel_type(FreeChunk*) \
|
||||
declare_toplevel_type(BinaryTreeDictionary*) \
|
||||
declare_toplevel_type(FreeBlockDictionary*) \
|
||||
declare_toplevel_type(FreeList*) \
|
||||
declare_toplevel_type(FreeList) \
|
||||
declare_toplevel_type(BinaryTreeDictionary<FreeChunk>*) \
|
||||
declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
|
||||
declare_toplevel_type(FreeList<FreeChunk>*) \
|
||||
declare_toplevel_type(FreeList<FreeChunk>) \
|
||||
declare_toplevel_type(LinearAllocBlock) \
|
||||
declare_toplevel_type(FreeBlockDictionary) \
|
||||
declare_type(BinaryTreeDictionary, FreeBlockDictionary)
|
||||
declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \
|
||||
declare_type(BinaryTreeDictionary<FreeChunk>, FreeBlockDictionary<FreeChunk>)
|
||||
|
||||
#define VM_INT_CONSTANTS_CMS(declare_constant) \
|
||||
declare_constant(Generation::ConcurrentMarkSweep) \
|
||||
|
|
|
@ -1183,35 +1183,31 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||
g1p->record_concurrent_mark_remark_end();
|
||||
}
|
||||
|
||||
// Used to calculate the # live objects per region
|
||||
// for verification purposes
|
||||
class CalcLiveObjectsClosure: public HeapRegionClosure {
|
||||
|
||||
CMBitMapRO* _bm;
|
||||
// Base class of the closures that finalize and verify the
|
||||
// liveness counting data.
|
||||
class CMCountDataClosureBase: public HeapRegionClosure {
|
||||
protected:
|
||||
ConcurrentMark* _cm;
|
||||
BitMap* _region_bm;
|
||||
BitMap* _card_bm;
|
||||
|
||||
size_t _region_marked_bytes;
|
||||
|
||||
intptr_t _bottom_card_num;
|
||||
|
||||
void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
|
||||
assert(start_card_num <= last_card_num, "sanity");
|
||||
BitMap::idx_t start_idx = start_card_num - _bottom_card_num;
|
||||
BitMap::idx_t last_idx = last_card_num - _bottom_card_num;
|
||||
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
|
||||
assert(start_idx <= last_idx, "sanity");
|
||||
|
||||
// Set the inclusive bit range [start_idx, last_idx].
|
||||
// For small ranges (up to 8 cards) use a simple loop; otherwise
|
||||
// use par_at_put_range.
|
||||
if ((last_idx - start_idx) < 8) {
|
||||
for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
|
||||
_card_bm->par_at_put(i, 1);
|
||||
_card_bm->par_set_bit(i);
|
||||
}
|
||||
} else {
|
||||
assert(last_idx < _card_bm->size(), "sanity");
|
||||
// Note BitMap::par_at_put_range() is exclusive.
|
||||
_card_bm->par_at_put_range(start_idx, last_idx+1, true);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
|
||||
BitMap* region_bm, BitMap* card_bm) :
|
||||
_bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
|
||||
_region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { }
|
||||
|
||||
// It takes a region that's not empty (i.e., it has at least one
|
||||
// live object in it and sets its corresponding bit on the region
|
||||
// bitmap to 1. If the region is "starts humongous" it will also set
|
||||
|
@ -1234,6 +1230,24 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
public:
|
||||
CMCountDataClosureBase(ConcurrentMark *cm,
|
||||
BitMap* region_bm, BitMap* card_bm):
|
||||
_cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
|
||||
};
|
||||
|
||||
// Closure that calculates the # live objects per region. Used
|
||||
// for verification purposes during the cleanup pause.
|
||||
class CalcLiveObjectsClosure: public CMCountDataClosureBase {
|
||||
CMBitMapRO* _bm;
|
||||
size_t _region_marked_bytes;
|
||||
|
||||
public:
|
||||
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
|
||||
BitMap* region_bm, BitMap* card_bm) :
|
||||
CMCountDataClosureBase(cm, region_bm, card_bm),
|
||||
_bm(bm), _region_marked_bytes(0) { }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
|
||||
if (hr->continuesHumongous()) {
|
||||
|
@ -1260,65 +1274,31 @@ public:
|
|||
|
||||
size_t marked_bytes = 0;
|
||||
|
||||
// Below, the term "card num" means the result of shifting an address
|
||||
// by the card shift -- address 0 corresponds to card number 0. One
|
||||
// must subtract the card num of the bottom of the heap to obtain a
|
||||
// card table index.
|
||||
|
||||
// The first card num of the sequence of live cards currently being
|
||||
// constructed. -1 ==> no sequence.
|
||||
intptr_t start_card_num = -1;
|
||||
|
||||
// The last card num of the sequence of live cards currently being
|
||||
// constructed. -1 ==> no sequence.
|
||||
intptr_t last_card_num = -1;
|
||||
|
||||
while (start < nextTop) {
|
||||
oop obj = oop(start);
|
||||
int obj_sz = obj->size();
|
||||
|
||||
// The card num of the start of the current object.
|
||||
intptr_t obj_card_num =
|
||||
intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift);
|
||||
HeapWord* obj_last = start + obj_sz - 1;
|
||||
intptr_t obj_last_card_num =
|
||||
intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift);
|
||||
|
||||
if (obj_card_num != last_card_num) {
|
||||
if (start_card_num == -1) {
|
||||
assert(last_card_num == -1, "Both or neither.");
|
||||
start_card_num = obj_card_num;
|
||||
} else {
|
||||
assert(last_card_num != -1, "Both or neither.");
|
||||
assert(obj_card_num >= last_card_num, "Inv");
|
||||
if ((obj_card_num - last_card_num) > 1) {
|
||||
// Mark the last run, and start a new one.
|
||||
mark_card_num_range(start_card_num, last_card_num);
|
||||
start_card_num = obj_card_num;
|
||||
}
|
||||
}
|
||||
}
|
||||
// In any case, we set the last card num.
|
||||
last_card_num = obj_last_card_num;
|
||||
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
|
||||
BitMap::idx_t last_idx = _cm->card_bitmap_index_for(obj_last);
|
||||
|
||||
// Set the bits in the card BM for this object (inclusive).
|
||||
set_card_bitmap_range(start_idx, last_idx);
|
||||
|
||||
// Add the size of this object to the number of marked bytes.
|
||||
marked_bytes += (size_t)obj_sz * HeapWordSize;
|
||||
|
||||
// Find the next marked object after this one.
|
||||
start = _bm->getNextMarkedWordAddress(start + 1, nextTop);
|
||||
}
|
||||
|
||||
// Handle the last range, if any.
|
||||
if (start_card_num != -1) {
|
||||
mark_card_num_range(start_card_num, last_card_num);
|
||||
start = _bm->getNextMarkedWordAddress(obj_last + 1, nextTop);
|
||||
}
|
||||
|
||||
// Mark the allocated-since-marking portion...
|
||||
HeapWord* top = hr->top();
|
||||
if (nextTop < top) {
|
||||
start_card_num = intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
|
||||
last_card_num = intptr_t(uintptr_t(top) >> CardTableModRefBS::card_shift);
|
||||
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(nextTop);
|
||||
BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top - 1);
|
||||
|
||||
mark_card_num_range(start_card_num, last_card_num);
|
||||
set_card_bitmap_range(start_idx, last_idx);
|
||||
|
||||
// This definitely means the region has live objects.
|
||||
set_bit_for_region(hr);
|
||||
|
@ -1394,17 +1374,6 @@ public:
|
|||
MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// Verify that _top_at_conc_count == ntams
|
||||
if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
|
||||
if (_verbose) {
|
||||
gclog_or_tty->print_cr("Region %u: top at conc count incorrect: "
|
||||
"expected " PTR_FORMAT ", actual: " PTR_FORMAT,
|
||||
hr->hrs_index(), hr->next_top_at_mark_start(),
|
||||
hr->top_at_conc_mark_count());
|
||||
}
|
||||
failures += 1;
|
||||
}
|
||||
|
||||
// Verify the marked bytes for this region.
|
||||
size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
|
||||
size_t act_marked_bytes = hr->next_marked_bytes();
|
||||
|
@ -1470,7 +1439,7 @@ public:
|
|||
_failures += failures;
|
||||
|
||||
// We could stop iteration over the heap when we
|
||||
// find the first voilating region by returning true.
|
||||
// find the first violating region by returning true.
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
@ -1543,62 +1512,19 @@ public:
|
|||
int failures() const { return _failures; }
|
||||
};
|
||||
|
||||
// Final update of count data (during cleanup).
|
||||
// Adds [top_at_count, NTAMS) to the marked bytes for each
|
||||
// region. Sets the bits in the card bitmap corresponding
|
||||
// to the interval [top_at_count, top], and sets the
|
||||
// liveness bit for each region containing live data
|
||||
// in the region bitmap.
|
||||
|
||||
class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
||||
ConcurrentMark* _cm;
|
||||
BitMap* _region_bm;
|
||||
BitMap* _card_bm;
|
||||
|
||||
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
|
||||
assert(start_idx <= last_idx, "sanity");
|
||||
|
||||
// Set the inclusive bit range [start_idx, last_idx].
|
||||
// For small ranges (up to 8 cards) use a simple loop; otherwise
|
||||
// use par_at_put_range.
|
||||
if ((last_idx - start_idx) <= 8) {
|
||||
for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
|
||||
_card_bm->par_set_bit(i);
|
||||
}
|
||||
} else {
|
||||
assert(last_idx < _card_bm->size(), "sanity");
|
||||
// Note BitMap::par_at_put_range() is exclusive.
|
||||
_card_bm->par_at_put_range(start_idx, last_idx+1, true);
|
||||
}
|
||||
}
|
||||
|
||||
// It takes a region that's not empty (i.e., it has at least one
|
||||
// live object in it and sets its corresponding bit on the region
|
||||
// bitmap to 1. If the region is "starts humongous" it will also set
|
||||
// to 1 the bits on the region bitmap that correspond to its
|
||||
// associated "continues humongous" regions.
|
||||
void set_bit_for_region(HeapRegion* hr) {
|
||||
assert(!hr->continuesHumongous(), "should have filtered those out");
|
||||
|
||||
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
|
||||
if (!hr->startsHumongous()) {
|
||||
// Normal (non-humongous) case: just set the bit.
|
||||
_region_bm->par_set_bit(index);
|
||||
} else {
|
||||
// Starts humongous case: calculate how many regions are part of
|
||||
// this humongous region and then set the bit range.
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
|
||||
BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
|
||||
_region_bm->par_at_put_range(index, end_index, true);
|
||||
}
|
||||
}
|
||||
// Closure that finalizes the liveness counting data.
|
||||
// Used during the cleanup pause.
|
||||
// Sets the bits corresponding to the interval [NTAMS, top]
|
||||
// (which contains the implicitly live objects) in the
|
||||
// card liveness bitmap. Also sets the bit for each region,
|
||||
// containing live data, in the region liveness bitmap.
|
||||
|
||||
class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
|
||||
public:
|
||||
FinalCountDataUpdateClosure(ConcurrentMark* cm,
|
||||
BitMap* region_bm,
|
||||
BitMap* card_bm) :
|
||||
_cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
|
||||
CMCountDataClosureBase(cm, region_bm, card_bm) { }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
|
||||
|
@ -1613,26 +1539,10 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
|||
return false;
|
||||
}
|
||||
|
||||
HeapWord* start = hr->top_at_conc_mark_count();
|
||||
HeapWord* ntams = hr->next_top_at_mark_start();
|
||||
HeapWord* top = hr->top();
|
||||
|
||||
assert(hr->bottom() <= start && start <= hr->end() &&
|
||||
hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
|
||||
|
||||
if (start < ntams) {
|
||||
// Region was changed between remark and cleanup pauses
|
||||
// We need to add (ntams - start) to the marked bytes
|
||||
// for this region, and set bits for the range
|
||||
// [ card_idx(start), card_idx(ntams) ) in the card bitmap.
|
||||
size_t live_bytes = (ntams - start) * HeapWordSize;
|
||||
hr->add_to_marked_bytes(live_bytes);
|
||||
|
||||
// Record the new top at conc count
|
||||
hr->set_top_at_conc_mark_count(ntams);
|
||||
|
||||
// The setting of the bits in the card bitmap takes place below
|
||||
}
|
||||
assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
|
||||
|
||||
// Mark the allocated-since-marking portion...
|
||||
if (ntams < top) {
|
||||
|
@ -1640,8 +1550,8 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
|
|||
set_bit_for_region(hr);
|
||||
}
|
||||
|
||||
// Now set the bits for [start, top]
|
||||
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
|
||||
// Now set the bits for [ntams, top]
|
||||
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
|
||||
BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top);
|
||||
set_card_bitmap_range(start_idx, last_idx);
|
||||
|
||||
|
@ -3072,9 +2982,6 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
|
|||
// Update the marked bytes for this region.
|
||||
hr->add_to_marked_bytes(marked_bytes);
|
||||
|
||||
// Now set the top at count to NTAMS.
|
||||
hr->set_top_at_conc_mark_count(limit);
|
||||
|
||||
// Next heap region
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -368,16 +368,11 @@ void YoungList::print() {
|
|||
if (curr == NULL)
|
||||
gclog_or_tty->print_cr(" empty");
|
||||
while (curr != NULL) {
|
||||
gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
|
||||
"age: %4d, y: %d, surv: %d",
|
||||
curr->bottom(), curr->end(),
|
||||
curr->top(),
|
||||
gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
|
||||
HR_FORMAT_PARAMS(curr),
|
||||
curr->prev_top_at_mark_start(),
|
||||
curr->next_top_at_mark_start(),
|
||||
curr->top_at_conc_mark_count(),
|
||||
curr->age_in_surv_rate_group_cond(),
|
||||
curr->is_young(),
|
||||
curr->is_survivor());
|
||||
curr->age_in_surv_rate_group_cond());
|
||||
curr = curr->get_next_young_region();
|
||||
}
|
||||
}
|
||||
|
@ -1253,12 +1248,13 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||
IsGCActiveMark x;
|
||||
|
||||
// Timing
|
||||
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
|
||||
assert(!system_gc || explicit_gc, "invariant");
|
||||
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
|
||||
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
||||
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||
TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
|
||||
G1Log::fine(), true, gclog_or_tty);
|
||||
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "Full GC (%s)", GCCause::to_string(gc_cause()));
|
||||
TraceTime t(verbose_str, G1Log::fine(), true, gclog_or_tty);
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->full_collection_counters());
|
||||
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
|
||||
|
@ -3593,25 +3589,22 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||
|
||||
// Inner scope for scope based logging, timers, and stats collection
|
||||
{
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "GC pause ");
|
||||
if (g1_policy()->gcs_are_young()) {
|
||||
strcat(verbose_str, "(young)");
|
||||
} else {
|
||||
strcat(verbose_str, "(mixed)");
|
||||
}
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
strcat(verbose_str, " (initial-mark)");
|
||||
// We are about to start a marking cycle, so we increment the
|
||||
// full collection counter.
|
||||
increment_total_full_collections();
|
||||
}
|
||||
|
||||
// if the log level is "finer" is on, we'll print long statistics information
|
||||
// in the collector policy code, so let's not print this as the output
|
||||
// is messy if we do.
|
||||
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
||||
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "GC pause (%s) (%s)%s",
|
||||
GCCause::to_string(gc_cause()),
|
||||
g1_policy()->gcs_are_young() ? "young" : "mixed",
|
||||
g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
|
||||
TraceTime t(verbose_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty);
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||
|
|
|
@ -886,8 +886,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
|||
size_t start_used) {
|
||||
if (G1Log::finer()) {
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
gclog_or_tty->print("[GC pause");
|
||||
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
|
||||
gclog_or_tty->print("[GC pause (%s) (%s)",
|
||||
GCCause::to_string(_g1->gc_cause()),
|
||||
gcs_are_young() ? "young" : "mixed");
|
||||
}
|
||||
|
||||
// We only need to do this here as the policy will only be applied
|
||||
|
@ -2459,16 +2460,10 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
|
|||
while (csr != NULL) {
|
||||
HeapRegion* next = csr->next_in_collection_set();
|
||||
assert(csr->in_collection_set(), "bad CS");
|
||||
st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
|
||||
"age: %4d, y: %d, surv: %d",
|
||||
csr->bottom(), csr->end(),
|
||||
csr->top(),
|
||||
csr->prev_top_at_mark_start(),
|
||||
csr->next_top_at_mark_start(),
|
||||
csr->top_at_conc_mark_count(),
|
||||
csr->age_in_surv_rate_group_cond(),
|
||||
csr->is_young(),
|
||||
csr->is_survivor());
|
||||
st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
|
||||
HR_FORMAT_PARAMS(csr),
|
||||
csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
|
||||
csr->age_in_surv_rate_group_cond());
|
||||
csr = next;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -510,9 +510,6 @@ HeapRegion::HeapRegion(uint hrs_index,
|
|||
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
|
||||
|
||||
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
|
||||
// In case the region is allocated during a pause, note the top.
|
||||
// We haven't done any counting on a brand new region.
|
||||
_top_at_conc_mark_count = bottom();
|
||||
}
|
||||
|
||||
class NextCompactionHeapRegionClosure: public HeapRegionClosure {
|
||||
|
@ -585,14 +582,12 @@ void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
|
|||
// we find to be self-forwarded on the next bitmap. So all
|
||||
// objects need to be below NTAMS.
|
||||
_next_top_at_mark_start = top();
|
||||
set_top_at_conc_mark_count(bottom());
|
||||
_next_marked_bytes = 0;
|
||||
} else if (during_conc_mark) {
|
||||
// During concurrent mark, all objects in the CSet (including
|
||||
// the ones we find to be self-forwarded) are implicitly live.
|
||||
// So all objects need to be above NTAMS.
|
||||
_next_top_at_mark_start = bottom();
|
||||
set_top_at_conc_mark_count(bottom());
|
||||
_next_marked_bytes = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -306,9 +306,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||
// If a collection pause is in progress, this is the top at the start
|
||||
// of that pause.
|
||||
|
||||
// We've counted the marked bytes of objects below here.
|
||||
HeapWord* _top_at_conc_mark_count;
|
||||
|
||||
void init_top_at_mark_start() {
|
||||
assert(_prev_marked_bytes == 0 &&
|
||||
_next_marked_bytes == 0,
|
||||
|
@ -316,7 +313,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||
HeapWord* bot = bottom();
|
||||
_prev_top_at_mark_start = bot;
|
||||
_next_top_at_mark_start = bot;
|
||||
_top_at_conc_mark_count = bot;
|
||||
}
|
||||
|
||||
void set_young_type(YoungType new_type) {
|
||||
|
@ -625,19 +621,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||
// last mark phase ended.
|
||||
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
|
||||
|
||||
void init_top_at_conc_mark_count() {
|
||||
_top_at_conc_mark_count = bottom();
|
||||
}
|
||||
|
||||
void set_top_at_conc_mark_count(HeapWord *cur) {
|
||||
assert(bottom() <= cur && cur <= end(), "Sanity.");
|
||||
_top_at_conc_mark_count = cur;
|
||||
}
|
||||
|
||||
HeapWord* top_at_conc_mark_count() {
|
||||
return _top_at_conc_mark_count;
|
||||
}
|
||||
|
||||
void reset_during_compaction() {
|
||||
guarantee( isHumongous() && startsHumongous(),
|
||||
"should only be called for humongous regions");
|
||||
|
@ -733,7 +716,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||
_evacuation_failed = b;
|
||||
|
||||
if (b) {
|
||||
init_top_at_conc_mark_count();
|
||||
_next_marked_bytes = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,6 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
|
|||
}
|
||||
|
||||
inline void HeapRegion::note_start_of_marking() {
|
||||
init_top_at_conc_mark_count();
|
||||
_next_marked_bytes = 0;
|
||||
_next_top_at_mark_start = top();
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
|||
// We measure the demand between the end of the previous sweep and
|
||||
// beginning of this sweep:
|
||||
// Count(end_last_sweep) - Count(start_this_sweep)
|
||||
// + splitBirths(between) - splitDeaths(between)
|
||||
// + split_births(between) - split_deaths(between)
|
||||
// The above number divided by the time since the end of the
|
||||
// previous sweep gives us a time rate of demand for blocks
|
||||
// of this size. We compute a padded average of this rate as
|
||||
|
@ -51,34 +51,34 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
|||
AdaptivePaddedAverage _demand_rate_estimate;
|
||||
|
||||
ssize_t _desired; // Demand stimate computed as described above
|
||||
ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing
|
||||
ssize_t _coal_desired; // desired +/- small-percent for tuning coalescing
|
||||
|
||||
ssize_t _surplus; // count - (desired +/- small-percent),
|
||||
// used to tune splitting in best fit
|
||||
ssize_t _bfrSurp; // surplus at start of current sweep
|
||||
ssize_t _prevSweep; // count from end of previous sweep
|
||||
ssize_t _beforeSweep; // count from before current sweep
|
||||
ssize_t _coalBirths; // additional chunks from coalescing
|
||||
ssize_t _coalDeaths; // loss from coalescing
|
||||
ssize_t _splitBirths; // additional chunks from splitting
|
||||
ssize_t _splitDeaths; // loss from splitting
|
||||
size_t _returnedBytes; // number of bytes returned to list.
|
||||
ssize_t _bfr_surp; // surplus at start of current sweep
|
||||
ssize_t _prev_sweep; // count from end of previous sweep
|
||||
ssize_t _before_sweep; // count from before current sweep
|
||||
ssize_t _coal_births; // additional chunks from coalescing
|
||||
ssize_t _coal_deaths; // loss from coalescing
|
||||
ssize_t _split_births; // additional chunks from splitting
|
||||
ssize_t _split_deaths; // loss from splitting
|
||||
size_t _returned_bytes; // number of bytes returned to list.
|
||||
public:
|
||||
void initialize(bool split_birth = false) {
|
||||
AdaptivePaddedAverage* dummy =
|
||||
new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
|
||||
CMS_FLSPadding);
|
||||
_desired = 0;
|
||||
_coalDesired = 0;
|
||||
_coal_desired = 0;
|
||||
_surplus = 0;
|
||||
_bfrSurp = 0;
|
||||
_prevSweep = 0;
|
||||
_beforeSweep = 0;
|
||||
_coalBirths = 0;
|
||||
_coalDeaths = 0;
|
||||
_splitBirths = (split_birth ? 1 : 0);
|
||||
_splitDeaths = 0;
|
||||
_returnedBytes = 0;
|
||||
_bfr_surp = 0;
|
||||
_prev_sweep = 0;
|
||||
_before_sweep = 0;
|
||||
_coal_births = 0;
|
||||
_coal_deaths = 0;
|
||||
_split_births = (split_birth ? 1 : 0);
|
||||
_split_deaths = 0;
|
||||
_returned_bytes = 0;
|
||||
}
|
||||
|
||||
AllocationStats() {
|
||||
|
@ -99,12 +99,12 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
|||
// vulnerable to noisy glitches. In such cases, we
|
||||
// ignore the current sample and use currently available
|
||||
// historical estimates.
|
||||
assert(prevSweep() + splitBirths() + coalBirths() // "Total Production Stock"
|
||||
>= splitDeaths() + coalDeaths() + (ssize_t)count, // "Current stock + depletion"
|
||||
assert(prev_sweep() + split_births() + coal_births() // "Total Production Stock"
|
||||
>= split_deaths() + coal_deaths() + (ssize_t)count, // "Current stock + depletion"
|
||||
"Conservation Principle");
|
||||
if (inter_sweep_current > _threshold) {
|
||||
ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() + coalBirths()
|
||||
- splitDeaths() - coalDeaths();
|
||||
ssize_t demand = prev_sweep() - (ssize_t)count + split_births() + coal_births()
|
||||
- split_deaths() - coal_deaths();
|
||||
assert(demand >= 0,
|
||||
err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for "
|
||||
PTR_FORMAT " (size=" SIZE_FORMAT ")",
|
||||
|
@ -130,40 +130,40 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
|||
ssize_t desired() const { return _desired; }
|
||||
void set_desired(ssize_t v) { _desired = v; }
|
||||
|
||||
ssize_t coalDesired() const { return _coalDesired; }
|
||||
void set_coalDesired(ssize_t v) { _coalDesired = v; }
|
||||
ssize_t coal_desired() const { return _coal_desired; }
|
||||
void set_coal_desired(ssize_t v) { _coal_desired = v; }
|
||||
|
||||
ssize_t surplus() const { return _surplus; }
|
||||
void set_surplus(ssize_t v) { _surplus = v; }
|
||||
void increment_surplus() { _surplus++; }
|
||||
void decrement_surplus() { _surplus--; }
|
||||
|
||||
ssize_t bfrSurp() const { return _bfrSurp; }
|
||||
void set_bfrSurp(ssize_t v) { _bfrSurp = v; }
|
||||
ssize_t prevSweep() const { return _prevSweep; }
|
||||
void set_prevSweep(ssize_t v) { _prevSweep = v; }
|
||||
ssize_t beforeSweep() const { return _beforeSweep; }
|
||||
void set_beforeSweep(ssize_t v) { _beforeSweep = v; }
|
||||
ssize_t bfr_surp() const { return _bfr_surp; }
|
||||
void set_bfr_surp(ssize_t v) { _bfr_surp = v; }
|
||||
ssize_t prev_sweep() const { return _prev_sweep; }
|
||||
void set_prev_sweep(ssize_t v) { _prev_sweep = v; }
|
||||
ssize_t before_sweep() const { return _before_sweep; }
|
||||
void set_before_sweep(ssize_t v) { _before_sweep = v; }
|
||||
|
||||
ssize_t coalBirths() const { return _coalBirths; }
|
||||
void set_coalBirths(ssize_t v) { _coalBirths = v; }
|
||||
void increment_coalBirths() { _coalBirths++; }
|
||||
ssize_t coal_births() const { return _coal_births; }
|
||||
void set_coal_births(ssize_t v) { _coal_births = v; }
|
||||
void increment_coal_births() { _coal_births++; }
|
||||
|
||||
ssize_t coalDeaths() const { return _coalDeaths; }
|
||||
void set_coalDeaths(ssize_t v) { _coalDeaths = v; }
|
||||
void increment_coalDeaths() { _coalDeaths++; }
|
||||
ssize_t coal_deaths() const { return _coal_deaths; }
|
||||
void set_coal_deaths(ssize_t v) { _coal_deaths = v; }
|
||||
void increment_coal_deaths() { _coal_deaths++; }
|
||||
|
||||
ssize_t splitBirths() const { return _splitBirths; }
|
||||
void set_splitBirths(ssize_t v) { _splitBirths = v; }
|
||||
void increment_splitBirths() { _splitBirths++; }
|
||||
ssize_t split_births() const { return _split_births; }
|
||||
void set_split_births(ssize_t v) { _split_births = v; }
|
||||
void increment_split_births() { _split_births++; }
|
||||
|
||||
ssize_t splitDeaths() const { return _splitDeaths; }
|
||||
void set_splitDeaths(ssize_t v) { _splitDeaths = v; }
|
||||
void increment_splitDeaths() { _splitDeaths++; }
|
||||
ssize_t split_deaths() const { return _split_deaths; }
|
||||
void set_split_deaths(ssize_t v) { _split_deaths = v; }
|
||||
void increment_split_deaths() { _split_deaths++; }
|
||||
|
||||
NOT_PRODUCT(
|
||||
size_t returnedBytes() const { return _returnedBytes; }
|
||||
void set_returnedBytes(size_t v) { _returnedBytes = v; }
|
||||
size_t returned_bytes() const { return _returned_bytes; }
|
||||
void set_returned_bytes(size_t v) { _returned_bytes = v; }
|
||||
)
|
||||
};
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
329
hotspot/src/share/vm/memory/binaryTreeDictionary.hpp
Normal file
329
hotspot/src/share/vm/memory/binaryTreeDictionary.hpp
Normal file
|
@ -0,0 +1,329 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_MEMORY_BINARYTREEDICTIONARY_HPP
|
||||
#define SHARE_VM_MEMORY_BINARYTREEDICTIONARY_HPP
|
||||
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
|
||||
/*
|
||||
* A binary tree based search structure for free blocks.
|
||||
* This is currently used in the Concurrent Mark&Sweep implementation, but
|
||||
* will be used for free block management for metadata.
|
||||
*/
|
||||
|
||||
// A TreeList is a FreeList which can be used to maintain a
|
||||
// binary tree of free lists.
|
||||
|
||||
template <class Chunk> class TreeChunk;
|
||||
template <class Chunk> class BinaryTreeDictionary;
|
||||
template <class Chunk> class AscendTreeCensusClosure;
|
||||
template <class Chunk> class DescendTreeCensusClosure;
|
||||
template <class Chunk> class DescendTreeSearchClosure;
|
||||
|
||||
template <class Chunk>
|
||||
class TreeList: public FreeList<Chunk> {
|
||||
friend class TreeChunk<Chunk>;
|
||||
friend class BinaryTreeDictionary<Chunk>;
|
||||
friend class AscendTreeCensusClosure<Chunk>;
|
||||
friend class DescendTreeCensusClosure<Chunk>;
|
||||
friend class DescendTreeSearchClosure<Chunk>;
|
||||
|
||||
TreeList<Chunk>* _parent;
|
||||
TreeList<Chunk>* _left;
|
||||
TreeList<Chunk>* _right;
|
||||
|
||||
protected:
|
||||
TreeList<Chunk>* parent() const { return _parent; }
|
||||
TreeList<Chunk>* left() const { return _left; }
|
||||
TreeList<Chunk>* right() const { return _right; }
|
||||
|
||||
// Wrapper on call to base class, to get the template to compile.
|
||||
Chunk* head() const { return FreeList<Chunk>::head(); }
|
||||
Chunk* tail() const { return FreeList<Chunk>::tail(); }
|
||||
void set_head(Chunk* head) { FreeList<Chunk>::set_head(head); }
|
||||
void set_tail(Chunk* tail) { FreeList<Chunk>::set_tail(tail); }
|
||||
|
||||
size_t size() const { return FreeList<Chunk>::size(); }
|
||||
|
||||
// Accessors for links in tree.
|
||||
|
||||
void set_left(TreeList<Chunk>* tl) {
|
||||
_left = tl;
|
||||
if (tl != NULL)
|
||||
tl->set_parent(this);
|
||||
}
|
||||
void set_right(TreeList<Chunk>* tl) {
|
||||
_right = tl;
|
||||
if (tl != NULL)
|
||||
tl->set_parent(this);
|
||||
}
|
||||
void set_parent(TreeList<Chunk>* tl) { _parent = tl; }
|
||||
|
||||
void clearLeft() { _left = NULL; }
|
||||
void clear_right() { _right = NULL; }
|
||||
void clear_parent() { _parent = NULL; }
|
||||
void initialize() { clearLeft(); clear_right(), clear_parent(); }
|
||||
|
||||
// For constructing a TreeList from a Tree chunk or
|
||||
// address and size.
|
||||
static TreeList<Chunk>* as_TreeList(TreeChunk<Chunk>* tc);
|
||||
static TreeList<Chunk>* as_TreeList(HeapWord* addr, size_t size);
|
||||
|
||||
// Returns the head of the free list as a pointer to a TreeChunk.
|
||||
TreeChunk<Chunk>* head_as_TreeChunk();
|
||||
|
||||
// Returns the first available chunk in the free list as a pointer
|
||||
// to a TreeChunk.
|
||||
TreeChunk<Chunk>* first_available();
|
||||
|
||||
// Returns the block with the largest heap address amongst
|
||||
// those in the list for this size; potentially slow and expensive,
|
||||
// use with caution!
|
||||
TreeChunk<Chunk>* largest_address();
|
||||
|
||||
// remove_chunk_replace_if_needed() removes the given "tc" from the TreeList.
|
||||
// If "tc" is the first chunk in the list, it is also the
|
||||
// TreeList that is the node in the tree. remove_chunk_replace_if_needed()
|
||||
// returns the possibly replaced TreeList* for the node in
|
||||
// the tree. It also updates the parent of the original
|
||||
// node to point to the new node.
|
||||
TreeList<Chunk>* remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc);
|
||||
// See FreeList.
|
||||
void return_chunk_at_head(TreeChunk<Chunk>* tc);
|
||||
void return_chunk_at_tail(TreeChunk<Chunk>* tc);
|
||||
};
|
||||
|
||||
// A TreeChunk is a subclass of a Chunk that additionally
|
||||
// maintains a pointer to the free list on which it is currently
|
||||
// linked.
|
||||
// A TreeChunk is also used as a node in the binary tree. This
|
||||
// allows the binary tree to be maintained without any additional
|
||||
// storage (the free chunks are used). In a binary tree the first
|
||||
// chunk in the free list is also the tree node. Note that the
|
||||
// TreeChunk has an embedded TreeList for this purpose. Because
|
||||
// the first chunk in the list is distinguished in this fashion
|
||||
// (also is the node in the tree), it is the last chunk to be found
|
||||
// on the free list for a node in the tree and is only removed if
|
||||
// it is the last chunk on the free list.
|
||||
|
||||
template <class Chunk>
|
||||
class TreeChunk : public Chunk {
|
||||
friend class TreeList<Chunk>;
|
||||
TreeList<Chunk>* _list;
|
||||
TreeList<Chunk> _embedded_list; // if non-null, this chunk is on _list
|
||||
protected:
|
||||
TreeList<Chunk>* embedded_list() const { return (TreeList<Chunk>*) &_embedded_list; }
|
||||
void set_embedded_list(TreeList<Chunk>* v) { _embedded_list = *v; }
|
||||
public:
|
||||
TreeList<Chunk>* list() { return _list; }
|
||||
void set_list(TreeList<Chunk>* v) { _list = v; }
|
||||
static TreeChunk<Chunk>* as_TreeChunk(Chunk* fc);
|
||||
// Initialize fields in a TreeChunk that should be
|
||||
// initialized when the TreeChunk is being added to
|
||||
// a free list in the tree.
|
||||
void initialize() { embedded_list()->initialize(); }
|
||||
|
||||
Chunk* next() const { return Chunk::next(); }
|
||||
Chunk* prev() const { return Chunk::prev(); }
|
||||
size_t size() const volatile { return Chunk::size(); }
|
||||
|
||||
// debugging
|
||||
void verify_tree_chunk_list() const;
|
||||
};
|
||||
|
||||
|
||||
template <class Chunk>
|
||||
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
|
||||
friend class VMStructs;
|
||||
bool _splay;
|
||||
size_t _total_size;
|
||||
size_t _total_free_blocks;
|
||||
TreeList<Chunk>* _root;
|
||||
bool _adaptive_freelists;
|
||||
|
||||
// private accessors
|
||||
bool splay() const { return _splay; }
|
||||
void set_splay(bool v) { _splay = v; }
|
||||
void set_total_size(size_t v) { _total_size = v; }
|
||||
virtual void inc_total_size(size_t v);
|
||||
virtual void dec_total_size(size_t v);
|
||||
size_t total_free_blocks() const { return _total_free_blocks; }
|
||||
void set_total_free_blocks(size_t v) { _total_free_blocks = v; }
|
||||
TreeList<Chunk>* root() const { return _root; }
|
||||
void set_root(TreeList<Chunk>* v) { _root = v; }
|
||||
bool adaptive_freelists() { return _adaptive_freelists; }
|
||||
|
||||
// This field is added and can be set to point to the
|
||||
// the Mutex used to synchronize access to the
|
||||
// dictionary so that assertion checking can be done.
|
||||
// For example it is set to point to _parDictionaryAllocLock.
|
||||
NOT_PRODUCT(Mutex* _lock;)
|
||||
|
||||
// Remove a chunk of size "size" or larger from the tree and
|
||||
// return it. If the chunk
|
||||
// is the last chunk of that size, remove the node for that size
|
||||
// from the tree.
|
||||
TreeChunk<Chunk>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay);
|
||||
// Return a list of the specified size or NULL from the tree.
|
||||
// The list is not removed from the tree.
|
||||
TreeList<Chunk>* find_list (size_t size) const;
|
||||
// Remove this chunk from the tree. If the removal results
|
||||
// in an empty list in the tree, remove the empty list.
|
||||
TreeChunk<Chunk>* remove_chunk_from_tree(TreeChunk<Chunk>* tc);
|
||||
// Remove the node in the trees starting at tl that has the
|
||||
// minimum value and return it. Repair the tree as needed.
|
||||
TreeList<Chunk>* remove_tree_minimum(TreeList<Chunk>* tl);
|
||||
void semi_splay_step(TreeList<Chunk>* tl);
|
||||
// Add this free chunk to the tree.
|
||||
void insert_chunk_in_tree(Chunk* freeChunk);
|
||||
public:
|
||||
|
||||
static const size_t min_tree_chunk_size = sizeof(TreeChunk<Chunk>)/HeapWordSize;
|
||||
|
||||
void verify_tree() const;
|
||||
// verify that the given chunk is in the tree.
|
||||
bool verify_chunk_in_free_list(Chunk* tc) const;
|
||||
private:
|
||||
void verify_tree_helper(TreeList<Chunk>* tl) const;
|
||||
static size_t verify_prev_free_ptrs(TreeList<Chunk>* tl);
|
||||
|
||||
// Returns the total number of chunks in the list.
|
||||
size_t total_list_length(TreeList<Chunk>* tl) const;
|
||||
// Returns the total number of words in the chunks in the tree
|
||||
// starting at "tl".
|
||||
size_t total_size_in_tree(TreeList<Chunk>* tl) const;
|
||||
// Returns the sum of the square of the size of each block
|
||||
// in the tree starting at "tl".
|
||||
double sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const;
|
||||
// Returns the total number of free blocks in the tree starting
|
||||
// at "tl".
|
||||
size_t total_free_blocks_in_tree(TreeList<Chunk>* tl) const;
|
||||
size_t num_free_blocks() const;
|
||||
size_t treeHeight() const;
|
||||
size_t tree_height_helper(TreeList<Chunk>* tl) const;
|
||||
size_t total_nodes_in_tree(TreeList<Chunk>* tl) const;
|
||||
size_t total_nodes_helper(TreeList<Chunk>* tl) const;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
BinaryTreeDictionary(bool adaptive_freelists, bool splay = false);
|
||||
BinaryTreeDictionary(MemRegion mr, bool adaptive_freelists, bool splay = false);
|
||||
|
||||
// Public accessors
|
||||
size_t total_size() const { return _total_size; }
|
||||
|
||||
// Reset the dictionary to the initial conditions with
|
||||
// a single free chunk.
|
||||
void reset(MemRegion mr);
|
||||
void reset(HeapWord* addr, size_t size);
|
||||
// Reset the dictionary to be empty.
|
||||
void reset();
|
||||
|
||||
// Return a chunk of size "size" or greater from
|
||||
// the tree.
|
||||
// want a better dynamic splay strategy for the future.
|
||||
Chunk* get_chunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) {
|
||||
FreeBlockDictionary<Chunk>::verify_par_locked();
|
||||
Chunk* res = get_chunk_from_tree(size, dither, splay());
|
||||
assert(res == NULL || res->is_free(),
|
||||
"Should be returning a free chunk");
|
||||
return res;
|
||||
}
|
||||
|
||||
void return_chunk(Chunk* chunk) {
|
||||
FreeBlockDictionary<Chunk>::verify_par_locked();
|
||||
insert_chunk_in_tree(chunk);
|
||||
}
|
||||
|
||||
void remove_chunk(Chunk* chunk) {
|
||||
FreeBlockDictionary<Chunk>::verify_par_locked();
|
||||
remove_chunk_from_tree((TreeChunk<Chunk>*)chunk);
|
||||
assert(chunk->is_free(), "Should still be a free chunk");
|
||||
}
|
||||
|
||||
size_t max_chunk_size() const;
|
||||
size_t total_chunk_size(debug_only(const Mutex* lock)) const {
|
||||
debug_only(
|
||||
if (lock != NULL && lock->owned_by_self()) {
|
||||
assert(total_size_in_tree(root()) == total_size(),
|
||||
"_total_size inconsistency");
|
||||
}
|
||||
)
|
||||
return total_size();
|
||||
}
|
||||
|
||||
size_t min_size() const {
|
||||
return min_tree_chunk_size;
|
||||
}
|
||||
|
||||
double sum_of_squared_block_sizes() const {
|
||||
return sum_of_squared_block_sizes(root());
|
||||
}
|
||||
|
||||
Chunk* find_chunk_ends_at(HeapWord* target) const;
|
||||
|
||||
// Find the list with size "size" in the binary tree and update
|
||||
// the statistics in the list according to "split" (chunk was
|
||||
// split or coalesce) and "birth" (chunk was added or removed).
|
||||
void dict_census_udpate(size_t size, bool split, bool birth);
|
||||
// Return true if the dictionary is overpopulated (more chunks of
|
||||
// this size than desired) for size "size".
|
||||
bool coal_dict_over_populated(size_t size);
|
||||
// Methods called at the beginning of a sweep to prepare the
|
||||
// statistics for the sweep.
|
||||
void begin_sweep_dict_census(double coalSurplusPercent,
|
||||
float inter_sweep_current,
|
||||
float inter_sweep_estimate,
|
||||
float intra_sweep_estimate);
|
||||
// Methods called after the end of a sweep to modify the
|
||||
// statistics for the sweep.
|
||||
void end_sweep_dict_census(double splitSurplusPercent);
|
||||
// Return the largest free chunk in the tree.
|
||||
Chunk* find_largest_dict() const;
|
||||
// Accessors for statistics
|
||||
void set_tree_surplus(double splitSurplusPercent);
|
||||
void set_tree_hints(void);
|
||||
// Reset statistics for all the lists in the tree.
|
||||
void clear_tree_census(void);
|
||||
// Print the statistcis for all the lists in the tree. Also may
|
||||
// print out summaries.
|
||||
void print_dict_census(void) const;
|
||||
void print_free_lists(outputStream* st) const;
|
||||
|
||||
// For debugging. Returns the sum of the _returned_bytes for
|
||||
// all lists in the tree.
|
||||
size_t sum_dict_returned_bytes() PRODUCT_RETURN0;
|
||||
// Sets the _returned_bytes for all the lists in the tree to zero.
|
||||
void initialize_dict_returned_bytes() PRODUCT_RETURN;
|
||||
// For debugging. Return the total number of chunks in the dictionary.
|
||||
size_t total_count() PRODUCT_RETURN0;
|
||||
|
||||
void report_statistics() const;
|
||||
|
||||
void verify() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_BINARYTREEDICTIONARY_HPP
|
|
@ -23,7 +23,10 @@
|
|||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
|
||||
#ifndef SERIALGC
|
||||
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
|
||||
#endif // SERIALGC
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#ifdef TARGET_OS_FAMILY_linux
|
||||
# include "thread_linux.inline.hpp"
|
||||
#endif
|
||||
|
@ -38,19 +41,19 @@
|
|||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
Mutex* FreeBlockDictionary::par_lock() const {
|
||||
template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
|
||||
return _lock;
|
||||
}
|
||||
|
||||
void FreeBlockDictionary::set_par_lock(Mutex* lock) {
|
||||
template <class Chunk> void FreeBlockDictionary<Chunk>::set_par_lock(Mutex* lock) {
|
||||
_lock = lock;
|
||||
}
|
||||
|
||||
void FreeBlockDictionary::verify_par_locked() const {
|
||||
template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() const {
|
||||
#ifdef ASSERT
|
||||
if (ParallelGCThreads > 0) {
|
||||
Thread* myThread = Thread::current();
|
||||
if (myThread->is_GC_task_thread()) {
|
||||
Thread* my_thread = Thread::current();
|
||||
if (my_thread->is_GC_task_thread()) {
|
||||
assert(par_lock() != NULL, "Should be using locking?");
|
||||
assert_lock_strong(par_lock());
|
||||
}
|
||||
|
@ -58,3 +61,8 @@ void FreeBlockDictionary::verify_par_locked() const {
|
|||
#endif // ASSERT
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef SERIALGC
|
||||
// Explicitly instantiate for FreeChunk
|
||||
template class FreeBlockDictionary<FreeChunk>;
|
||||
#endif // SERIALGC
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -22,12 +22,10 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREEBLOCKDICTIONARY_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREEBLOCKDICTIONARY_HPP
|
||||
#ifndef SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
|
||||
#define SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
|
||||
|
||||
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
@ -35,6 +33,7 @@
|
|||
|
||||
// A FreeBlockDictionary is an abstract superclass that will allow
|
||||
// a number of alternative implementations in the future.
|
||||
template <class Chunk>
|
||||
class FreeBlockDictionary: public CHeapObj {
|
||||
public:
|
||||
enum Dither {
|
||||
|
@ -52,45 +51,45 @@ class FreeBlockDictionary: public CHeapObj {
|
|||
NOT_PRODUCT(Mutex* _lock;)
|
||||
|
||||
public:
|
||||
virtual void removeChunk(FreeChunk* fc) = 0;
|
||||
virtual FreeChunk* getChunk(size_t size, Dither dither = atLeast) = 0;
|
||||
virtual void returnChunk(FreeChunk* chunk) = 0;
|
||||
virtual size_t totalChunkSize(debug_only(const Mutex* lock)) const = 0;
|
||||
virtual size_t maxChunkSize() const = 0;
|
||||
virtual size_t minSize() const = 0;
|
||||
virtual void remove_chunk(Chunk* fc) = 0;
|
||||
virtual Chunk* get_chunk(size_t size, Dither dither = atLeast) = 0;
|
||||
virtual void return_chunk(Chunk* chunk) = 0;
|
||||
virtual size_t total_chunk_size(debug_only(const Mutex* lock)) const = 0;
|
||||
virtual size_t max_chunk_size() const = 0;
|
||||
virtual size_t min_size() const = 0;
|
||||
// Reset the dictionary to the initial conditions for a single
|
||||
// block.
|
||||
virtual void reset(HeapWord* addr, size_t size) = 0;
|
||||
virtual void reset() = 0;
|
||||
|
||||
virtual void dictCensusUpdate(size_t size, bool split, bool birth) = 0;
|
||||
virtual bool coalDictOverPopulated(size_t size) = 0;
|
||||
virtual void beginSweepDictCensus(double coalSurplusPercent,
|
||||
virtual void dict_census_udpate(size_t size, bool split, bool birth) = 0;
|
||||
virtual bool coal_dict_over_populated(size_t size) = 0;
|
||||
virtual void begin_sweep_dict_census(double coalSurplusPercent,
|
||||
float inter_sweep_current, float inter_sweep_estimate,
|
||||
float intra__sweep_current) = 0;
|
||||
virtual void endSweepDictCensus(double splitSurplusPercent) = 0;
|
||||
virtual FreeChunk* findLargestDict() const = 0;
|
||||
virtual void end_sweep_dict_census(double splitSurplusPercent) = 0;
|
||||
virtual Chunk* find_largest_dict() const = 0;
|
||||
// verify that the given chunk is in the dictionary.
|
||||
virtual bool verifyChunkInFreeLists(FreeChunk* tc) const = 0;
|
||||
virtual bool verify_chunk_in_free_list(Chunk* tc) const = 0;
|
||||
|
||||
// Sigma_{all_free_blocks} (block_size^2)
|
||||
virtual double sum_of_squared_block_sizes() const = 0;
|
||||
|
||||
virtual FreeChunk* find_chunk_ends_at(HeapWord* target) const = 0;
|
||||
virtual void inc_totalSize(size_t v) = 0;
|
||||
virtual void dec_totalSize(size_t v) = 0;
|
||||
virtual Chunk* find_chunk_ends_at(HeapWord* target) const = 0;
|
||||
virtual void inc_total_size(size_t v) = 0;
|
||||
virtual void dec_total_size(size_t v) = 0;
|
||||
|
||||
NOT_PRODUCT (
|
||||
virtual size_t sumDictReturnedBytes() = 0;
|
||||
virtual void initializeDictReturnedBytes() = 0;
|
||||
virtual size_t totalCount() = 0;
|
||||
virtual size_t sum_dict_returned_bytes() = 0;
|
||||
virtual void initialize_dict_returned_bytes() = 0;
|
||||
virtual size_t total_count() = 0;
|
||||
)
|
||||
|
||||
virtual void reportStatistics() const {
|
||||
virtual void report_statistics() const {
|
||||
gclog_or_tty->print("No statistics available");
|
||||
}
|
||||
|
||||
virtual void printDictCensus() const = 0;
|
||||
virtual void print_dict_census() const = 0;
|
||||
virtual void print_free_lists(outputStream* st) const = 0;
|
||||
|
||||
virtual void verify() const = 0;
|
||||
|
@ -100,4 +99,4 @@ class FreeBlockDictionary: public CHeapObj {
|
|||
void verify_par_locked() const PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREEBLOCKDICTIONARY_HPP
|
||||
#endif // SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
|
|
@ -23,20 +23,25 @@
|
|||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeList.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
|
||||
#ifndef SERIALGC
|
||||
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
|
||||
#endif // SERIALGC
|
||||
|
||||
// Free list. A FreeList is used to access a linked list of chunks
|
||||
// of space in the heap. The head and tail are maintained so that
|
||||
// items can be (as in the current implementation) added at the
|
||||
// at the tail of the list and removed from the head of the list to
|
||||
// maintain a FIFO queue.
|
||||
|
||||
FreeList::FreeList() :
|
||||
template <class Chunk>
|
||||
FreeList<Chunk>::FreeList() :
|
||||
_head(NULL), _tail(NULL)
|
||||
#ifdef ASSERT
|
||||
, _protecting_lock(NULL)
|
||||
|
@ -48,7 +53,8 @@ FreeList::FreeList() :
|
|||
init_statistics();
|
||||
}
|
||||
|
||||
FreeList::FreeList(FreeChunk* fc) :
|
||||
template <class Chunk>
|
||||
FreeList<Chunk>::FreeList(Chunk* fc) :
|
||||
_head(fc), _tail(fc)
|
||||
#ifdef ASSERT
|
||||
, _protecting_lock(NULL)
|
||||
|
@ -59,48 +65,35 @@ FreeList::FreeList(FreeChunk* fc) :
|
|||
_hint = 0;
|
||||
init_statistics();
|
||||
#ifndef PRODUCT
|
||||
_allocation_stats.set_returnedBytes(size() * HeapWordSize);
|
||||
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
FreeList::FreeList(HeapWord* addr, size_t size) :
|
||||
_head((FreeChunk*) addr), _tail((FreeChunk*) addr)
|
||||
#ifdef ASSERT
|
||||
, _protecting_lock(NULL)
|
||||
#endif
|
||||
{
|
||||
assert(size > sizeof(FreeChunk), "size is too small");
|
||||
head()->setSize(size);
|
||||
_size = size;
|
||||
_count = 1;
|
||||
init_statistics();
|
||||
#ifndef PRODUCT
|
||||
_allocation_stats.set_returnedBytes(_size * HeapWordSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
void FreeList::reset(size_t hint) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::reset(size_t hint) {
|
||||
set_count(0);
|
||||
set_head(NULL);
|
||||
set_tail(NULL);
|
||||
set_hint(hint);
|
||||
}
|
||||
|
||||
void FreeList::init_statistics(bool split_birth) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::init_statistics(bool split_birth) {
|
||||
_allocation_stats.initialize(split_birth);
|
||||
}
|
||||
|
||||
FreeChunk* FreeList::getChunkAtHead() {
|
||||
template <class Chunk>
|
||||
Chunk* FreeList<Chunk>::get_chunk_at_head() {
|
||||
assert_proper_lock_protection();
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
|
||||
FreeChunk* fc = head();
|
||||
Chunk* fc = head();
|
||||
if (fc != NULL) {
|
||||
FreeChunk* nextFC = fc->next();
|
||||
Chunk* nextFC = fc->next();
|
||||
if (nextFC != NULL) {
|
||||
// The chunk fc being removed has a "next". Set the "next" to the
|
||||
// "prev" of fc.
|
||||
nextFC->linkPrev(NULL);
|
||||
nextFC->link_prev(NULL);
|
||||
} else { // removed tail of list
|
||||
link_tail(NULL);
|
||||
}
|
||||
|
@ -113,29 +106,30 @@ FreeChunk* FreeList::getChunkAtHead() {
|
|||
}
|
||||
|
||||
|
||||
void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl) {
|
||||
assert_proper_lock_protection();
|
||||
assert(fl->count() == 0, "Precondition");
|
||||
if (count() > 0) {
|
||||
int k = 1;
|
||||
fl->set_head(head()); n--;
|
||||
FreeChunk* tl = head();
|
||||
Chunk* tl = head();
|
||||
while (tl->next() != NULL && n > 0) {
|
||||
tl = tl->next(); n--; k++;
|
||||
}
|
||||
assert(tl != NULL, "Loop Inv.");
|
||||
|
||||
// First, fix up the list we took from.
|
||||
FreeChunk* new_head = tl->next();
|
||||
Chunk* new_head = tl->next();
|
||||
set_head(new_head);
|
||||
set_count(count() - k);
|
||||
if (new_head == NULL) {
|
||||
set_tail(NULL);
|
||||
} else {
|
||||
new_head->linkPrev(NULL);
|
||||
new_head->link_prev(NULL);
|
||||
}
|
||||
// Now we can fix up the tail.
|
||||
tl->linkNext(NULL);
|
||||
tl->link_next(NULL);
|
||||
// And return the result.
|
||||
fl->set_tail(tl);
|
||||
fl->set_count(k);
|
||||
|
@ -143,7 +137,8 @@ void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) {
|
|||
}
|
||||
|
||||
// Remove this chunk from the list
|
||||
void FreeList::removeChunk(FreeChunk*fc) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::remove_chunk(Chunk*fc) {
|
||||
assert_proper_lock_protection();
|
||||
assert(head() != NULL, "Remove from empty list");
|
||||
assert(fc != NULL, "Remove a NULL chunk");
|
||||
|
@ -151,12 +146,12 @@ void FreeList::removeChunk(FreeChunk*fc) {
|
|||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
|
||||
|
||||
FreeChunk* prevFC = fc->prev();
|
||||
FreeChunk* nextFC = fc->next();
|
||||
Chunk* prevFC = fc->prev();
|
||||
Chunk* nextFC = fc->next();
|
||||
if (nextFC != NULL) {
|
||||
// The chunk fc being removed has a "next". Set the "next" to the
|
||||
// "prev" of fc.
|
||||
nextFC->linkPrev(prevFC);
|
||||
nextFC->link_prev(prevFC);
|
||||
} else { // removed tail of list
|
||||
link_tail(prevFC);
|
||||
}
|
||||
|
@ -165,7 +160,7 @@ void FreeList::removeChunk(FreeChunk*fc) {
|
|||
assert(nextFC == NULL || nextFC->prev() == NULL,
|
||||
"Prev of head should be NULL");
|
||||
} else {
|
||||
prevFC->linkNext(nextFC);
|
||||
prevFC->link_next(nextFC);
|
||||
assert(tail() != prevFC || prevFC->next() == NULL,
|
||||
"Next of tail should be NULL");
|
||||
}
|
||||
|
@ -174,10 +169,10 @@ void FreeList::removeChunk(FreeChunk*fc) {
|
|||
"H/T/C Inconsistency");
|
||||
// clear next and prev fields of fc, debug only
|
||||
NOT_PRODUCT(
|
||||
fc->linkPrev(NULL);
|
||||
fc->linkNext(NULL);
|
||||
fc->link_prev(NULL);
|
||||
fc->link_next(NULL);
|
||||
)
|
||||
assert(fc->isFree(), "Should still be a free chunk");
|
||||
assert(fc->is_free(), "Should still be a free chunk");
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
|
||||
assert(head() == NULL || head()->size() == size(), "wrong item on list");
|
||||
|
@ -185,16 +180,17 @@ void FreeList::removeChunk(FreeChunk*fc) {
|
|||
}
|
||||
|
||||
// Add this chunk at the head of the list.
|
||||
void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
|
||||
assert_proper_lock_protection();
|
||||
assert(chunk != NULL, "insert a NULL chunk");
|
||||
assert(size() == chunk->size(), "Wrong size");
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
|
||||
|
||||
FreeChunk* oldHead = head();
|
||||
Chunk* oldHead = head();
|
||||
assert(chunk != oldHead, "double insertion");
|
||||
chunk->linkAfter(oldHead);
|
||||
chunk->link_after(oldHead);
|
||||
link_head(chunk);
|
||||
if (oldHead == NULL) { // only chunk in list
|
||||
assert(tail() == NULL, "inconsistent FreeList");
|
||||
|
@ -203,7 +199,7 @@ void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
|
|||
increment_count(); // of # of chunks in list
|
||||
DEBUG_ONLY(
|
||||
if (record_return) {
|
||||
increment_returnedBytes_by(size()*HeapWordSize);
|
||||
increment_returned_bytes_by(size()*HeapWordSize);
|
||||
}
|
||||
)
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
|
@ -212,23 +208,25 @@ void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
|
|||
assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
|
||||
}
|
||||
|
||||
void FreeList::returnChunkAtHead(FreeChunk* chunk) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
|
||||
assert_proper_lock_protection();
|
||||
returnChunkAtHead(chunk, true);
|
||||
return_chunk_at_head(chunk, true);
|
||||
}
|
||||
|
||||
// Add this chunk at the tail of the list.
|
||||
void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
|
||||
assert_proper_lock_protection();
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
|
||||
assert(chunk != NULL, "insert a NULL chunk");
|
||||
assert(size() == chunk->size(), "wrong size");
|
||||
|
||||
FreeChunk* oldTail = tail();
|
||||
Chunk* oldTail = tail();
|
||||
assert(chunk != oldTail, "double insertion");
|
||||
if (oldTail != NULL) {
|
||||
oldTail->linkAfter(chunk);
|
||||
oldTail->link_after(chunk);
|
||||
} else { // only chunk in list
|
||||
assert(head() == NULL, "inconsistent FreeList");
|
||||
link_head(chunk);
|
||||
|
@ -237,7 +235,7 @@ void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
|
|||
increment_count(); // of # of chunks in list
|
||||
DEBUG_ONLY(
|
||||
if (record_return) {
|
||||
increment_returnedBytes_by(size()*HeapWordSize);
|
||||
increment_returned_bytes_by(size()*HeapWordSize);
|
||||
}
|
||||
)
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
|
@ -246,11 +244,13 @@ void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
|
|||
assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
|
||||
}
|
||||
|
||||
void FreeList::returnChunkAtTail(FreeChunk* chunk) {
|
||||
returnChunkAtTail(chunk, true);
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
|
||||
return_chunk_at_tail(chunk, true);
|
||||
}
|
||||
|
||||
void FreeList::prepend(FreeList* fl) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
|
||||
assert_proper_lock_protection();
|
||||
if (fl->count() > 0) {
|
||||
if (count() == 0) {
|
||||
|
@ -259,11 +259,11 @@ void FreeList::prepend(FreeList* fl) {
|
|||
set_count(fl->count());
|
||||
} else {
|
||||
// Both are non-empty.
|
||||
FreeChunk* fl_tail = fl->tail();
|
||||
FreeChunk* this_head = head();
|
||||
Chunk* fl_tail = fl->tail();
|
||||
Chunk* this_head = head();
|
||||
assert(fl_tail->next() == NULL, "Well-formedness of fl");
|
||||
fl_tail->linkNext(this_head);
|
||||
this_head->linkPrev(fl_tail);
|
||||
fl_tail->link_next(this_head);
|
||||
this_head->link_prev(fl_tail);
|
||||
set_head(fl->head());
|
||||
set_count(count() + fl->count());
|
||||
}
|
||||
|
@ -273,13 +273,14 @@ void FreeList::prepend(FreeList* fl) {
|
|||
}
|
||||
}
|
||||
|
||||
// verifyChunkInFreeLists() is used to verify that an item is in this free list.
|
||||
// verify_chunk_in_free_list() is used to verify that an item is in this free list.
|
||||
// It is used as a debugging aid.
|
||||
bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
|
||||
template <class Chunk>
|
||||
bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
|
||||
// This is an internal consistency check, not part of the check that the
|
||||
// chunk is in the free lists.
|
||||
guarantee(fc->size() == size(), "Wrong list is being searched");
|
||||
FreeChunk* curFC = head();
|
||||
Chunk* curFC = head();
|
||||
while (curFC) {
|
||||
// This is an internal consistency check.
|
||||
guarantee(size() == curFC->size(), "Chunk is in wrong list.");
|
||||
|
@ -292,7 +293,8 @@ bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
|
|||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void FreeList::verify_stats() const {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::verify_stats() const {
|
||||
// The +1 of the LH comparand is to allow some "looseness" in
|
||||
// checking: we usually call this interface when adding a block
|
||||
// and we'll subsequently update the stats; we cannot update the
|
||||
|
@ -300,24 +302,25 @@ void FreeList::verify_stats() const {
|
|||
// dictionary for example, this might be the first block and
|
||||
// in that case there would be no place that we could record
|
||||
// the stats (which are kept in the block itself).
|
||||
assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths()
|
||||
+ _allocation_stats.coalBirths() + 1) // Total Production Stock + 1
|
||||
>= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths()
|
||||
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
|
||||
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
|
||||
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
|
||||
+ (ssize_t)count()), // Total Current Stock + depletion
|
||||
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
|
||||
" violates Conservation Principle: "
|
||||
"prevSweep(" SIZE_FORMAT ")"
|
||||
" + splitBirths(" SIZE_FORMAT ")"
|
||||
" + coalBirths(" SIZE_FORMAT ") + 1 >= "
|
||||
" splitDeaths(" SIZE_FORMAT ")"
|
||||
" coalDeaths(" SIZE_FORMAT ")"
|
||||
"prev_sweep(" SIZE_FORMAT ")"
|
||||
" + split_births(" SIZE_FORMAT ")"
|
||||
" + coal_births(" SIZE_FORMAT ") + 1 >= "
|
||||
" split_deaths(" SIZE_FORMAT ")"
|
||||
" coal_deaths(" SIZE_FORMAT ")"
|
||||
" + count(" SSIZE_FORMAT ")",
|
||||
this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(),
|
||||
_allocation_stats.splitBirths(), _allocation_stats.splitDeaths(),
|
||||
_allocation_stats.coalDeaths(), count()));
|
||||
this, _size, _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
|
||||
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
|
||||
_allocation_stats.coal_deaths(), count()));
|
||||
}
|
||||
|
||||
void FreeList::assert_proper_lock_protection_work() const {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::assert_proper_lock_protection_work() const {
|
||||
assert(_protecting_lock != NULL, "Don't call this directly");
|
||||
assert(ParallelGCThreads > 0, "Don't call this directly");
|
||||
Thread* thr = Thread::current();
|
||||
|
@ -334,7 +337,8 @@ void FreeList::assert_proper_lock_protection_work() const {
|
|||
#endif
|
||||
|
||||
// Print the "label line" for free list stats.
|
||||
void FreeList::print_labels_on(outputStream* st, const char* c) {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) {
|
||||
st->print("%16s\t", c);
|
||||
st->print("%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t"
|
||||
"%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t" "\n",
|
||||
|
@ -346,7 +350,8 @@ void FreeList::print_labels_on(outputStream* st, const char* c) {
|
|||
// to the call is a non-null string, it is printed in the first column;
|
||||
// otherwise, if the argument is null (the default), then the size of the
|
||||
// (free list) block is printed in the first column.
|
||||
void FreeList::print_on(outputStream* st, const char* c) const {
|
||||
template <class Chunk>
|
||||
void FreeList<Chunk>::print_on(outputStream* st, const char* c) const {
|
||||
if (c != NULL) {
|
||||
st->print("%16s", c);
|
||||
} else {
|
||||
|
@ -355,6 +360,11 @@ void FreeList::print_on(outputStream* st, const char* c) const {
|
|||
st->print("\t"
|
||||
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
|
||||
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
|
||||
bfrSurp(), surplus(), desired(), prevSweep(), beforeSweep(),
|
||||
count(), coalBirths(), coalDeaths(), splitBirths(), splitDeaths());
|
||||
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
|
||||
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
// Needs to be after the definitions have been seen.
|
||||
template class FreeList<FreeChunk>;
|
||||
#endif // SERIALGC
|
|
@ -22,39 +22,36 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
|
||||
#ifndef SHARE_VM_MEMORY_FREELIST_HPP
|
||||
#define SHARE_VM_MEMORY_FREELIST_HPP
|
||||
|
||||
#include "gc_implementation/shared/allocationStats.hpp"
|
||||
|
||||
class CompactibleFreeListSpace;
|
||||
|
||||
// A class for maintaining a free list of FreeChunk's. The FreeList
|
||||
// A class for maintaining a free list of Chunk's. The FreeList
|
||||
// maintains a the structure of the list (head, tail, etc.) plus
|
||||
// statistics for allocations from the list. The links between items
|
||||
// are not part of FreeList. The statistics are
|
||||
// used to make decisions about coalescing FreeChunk's when they
|
||||
// used to make decisions about coalescing Chunk's when they
|
||||
// are swept during collection.
|
||||
//
|
||||
// See the corresponding .cpp file for a description of the specifics
|
||||
// for that implementation.
|
||||
|
||||
class Mutex;
|
||||
class TreeList;
|
||||
template <class Chunk> class TreeList;
|
||||
template <class Chunk> class PrintTreeCensusClosure;
|
||||
|
||||
template <class Chunk>
|
||||
class FreeList VALUE_OBJ_CLASS_SPEC {
|
||||
friend class CompactibleFreeListSpace;
|
||||
friend class VMStructs;
|
||||
friend class PrintTreeCensusClosure;
|
||||
|
||||
protected:
|
||||
TreeList* _parent;
|
||||
TreeList* _left;
|
||||
TreeList* _right;
|
||||
friend class PrintTreeCensusClosure<Chunk>;
|
||||
|
||||
private:
|
||||
FreeChunk* _head; // Head of list of free chunks
|
||||
FreeChunk* _tail; // Tail of list of free chunks
|
||||
Chunk* _head; // Head of list of free chunks
|
||||
Chunk* _tail; // Tail of list of free chunks
|
||||
size_t _size; // Size in Heap words of each chunk
|
||||
ssize_t _count; // Number of entries in list
|
||||
size_t _hint; // next larger size list with a positive surplus
|
||||
|
@ -92,10 +89,7 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
|||
// Construct a list without any entries.
|
||||
FreeList();
|
||||
// Construct a list with "fc" as the first (and lone) entry in the list.
|
||||
FreeList(FreeChunk* fc);
|
||||
// Construct a list which will have a FreeChunk at address "addr" and
|
||||
// of size "size" as the first (and lone) entry in the list.
|
||||
FreeList(HeapWord* addr, size_t size);
|
||||
FreeList(Chunk* fc);
|
||||
|
||||
// Reset the head, tail, hint, and count of a free list.
|
||||
void reset(size_t hint);
|
||||
|
@ -108,43 +102,43 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
|||
#endif
|
||||
|
||||
// Accessors.
|
||||
FreeChunk* head() const {
|
||||
Chunk* head() const {
|
||||
assert_proper_lock_protection();
|
||||
return _head;
|
||||
}
|
||||
void set_head(FreeChunk* v) {
|
||||
void set_head(Chunk* v) {
|
||||
assert_proper_lock_protection();
|
||||
_head = v;
|
||||
assert(!_head || _head->size() == _size, "bad chunk size");
|
||||
}
|
||||
// Set the head of the list and set the prev field of non-null
|
||||
// values to NULL.
|
||||
void link_head(FreeChunk* v) {
|
||||
void link_head(Chunk* v) {
|
||||
assert_proper_lock_protection();
|
||||
set_head(v);
|
||||
// If this method is not used (just set the head instead),
|
||||
// this check can be avoided.
|
||||
if (v != NULL) {
|
||||
v->linkPrev(NULL);
|
||||
v->link_prev(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
FreeChunk* tail() const {
|
||||
Chunk* tail() const {
|
||||
assert_proper_lock_protection();
|
||||
return _tail;
|
||||
}
|
||||
void set_tail(FreeChunk* v) {
|
||||
void set_tail(Chunk* v) {
|
||||
assert_proper_lock_protection();
|
||||
_tail = v;
|
||||
assert(!_tail || _tail->size() == _size, "bad chunk size");
|
||||
}
|
||||
// Set the tail of the list and set the next field of non-null
|
||||
// values to NULL.
|
||||
void link_tail(FreeChunk* v) {
|
||||
void link_tail(Chunk* v) {
|
||||
assert_proper_lock_protection();
|
||||
set_tail(v);
|
||||
if (v != NULL) {
|
||||
v->clearNext();
|
||||
v->clear_next();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -191,12 +185,12 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
|||
inter_sweep_estimate,
|
||||
intra_sweep_estimate);
|
||||
}
|
||||
ssize_t coalDesired() const {
|
||||
return _allocation_stats.coalDesired();
|
||||
ssize_t coal_desired() const {
|
||||
return _allocation_stats.coal_desired();
|
||||
}
|
||||
void set_coalDesired(ssize_t v) {
|
||||
void set_coal_desired(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_coalDesired(v);
|
||||
_allocation_stats.set_coal_desired(v);
|
||||
}
|
||||
|
||||
ssize_t surplus() const {
|
||||
|
@ -215,114 +209,114 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
|||
_allocation_stats.decrement_surplus();
|
||||
}
|
||||
|
||||
ssize_t bfrSurp() const {
|
||||
return _allocation_stats.bfrSurp();
|
||||
ssize_t bfr_surp() const {
|
||||
return _allocation_stats.bfr_surp();
|
||||
}
|
||||
void set_bfrSurp(ssize_t v) {
|
||||
void set_bfr_surp(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_bfrSurp(v);
|
||||
_allocation_stats.set_bfr_surp(v);
|
||||
}
|
||||
ssize_t prevSweep() const {
|
||||
return _allocation_stats.prevSweep();
|
||||
ssize_t prev_sweep() const {
|
||||
return _allocation_stats.prev_sweep();
|
||||
}
|
||||
void set_prevSweep(ssize_t v) {
|
||||
void set_prev_sweep(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_prevSweep(v);
|
||||
_allocation_stats.set_prev_sweep(v);
|
||||
}
|
||||
ssize_t beforeSweep() const {
|
||||
return _allocation_stats.beforeSweep();
|
||||
ssize_t before_sweep() const {
|
||||
return _allocation_stats.before_sweep();
|
||||
}
|
||||
void set_beforeSweep(ssize_t v) {
|
||||
void set_before_sweep(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_beforeSweep(v);
|
||||
_allocation_stats.set_before_sweep(v);
|
||||
}
|
||||
|
||||
ssize_t coalBirths() const {
|
||||
return _allocation_stats.coalBirths();
|
||||
ssize_t coal_births() const {
|
||||
return _allocation_stats.coal_births();
|
||||
}
|
||||
void set_coalBirths(ssize_t v) {
|
||||
void set_coal_births(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_coalBirths(v);
|
||||
_allocation_stats.set_coal_births(v);
|
||||
}
|
||||
void increment_coalBirths() {
|
||||
void increment_coal_births() {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.increment_coalBirths();
|
||||
_allocation_stats.increment_coal_births();
|
||||
}
|
||||
|
||||
ssize_t coalDeaths() const {
|
||||
return _allocation_stats.coalDeaths();
|
||||
ssize_t coal_deaths() const {
|
||||
return _allocation_stats.coal_deaths();
|
||||
}
|
||||
void set_coalDeaths(ssize_t v) {
|
||||
void set_coal_deaths(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_coalDeaths(v);
|
||||
_allocation_stats.set_coal_deaths(v);
|
||||
}
|
||||
void increment_coalDeaths() {
|
||||
void increment_coal_deaths() {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.increment_coalDeaths();
|
||||
_allocation_stats.increment_coal_deaths();
|
||||
}
|
||||
|
||||
ssize_t splitBirths() const {
|
||||
return _allocation_stats.splitBirths();
|
||||
ssize_t split_births() const {
|
||||
return _allocation_stats.split_births();
|
||||
}
|
||||
void set_splitBirths(ssize_t v) {
|
||||
void set_split_births(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_splitBirths(v);
|
||||
_allocation_stats.set_split_births(v);
|
||||
}
|
||||
void increment_splitBirths() {
|
||||
void increment_split_births() {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.increment_splitBirths();
|
||||
_allocation_stats.increment_split_births();
|
||||
}
|
||||
|
||||
ssize_t splitDeaths() const {
|
||||
return _allocation_stats.splitDeaths();
|
||||
ssize_t split_deaths() const {
|
||||
return _allocation_stats.split_deaths();
|
||||
}
|
||||
void set_splitDeaths(ssize_t v) {
|
||||
void set_split_deaths(ssize_t v) {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.set_splitDeaths(v);
|
||||
_allocation_stats.set_split_deaths(v);
|
||||
}
|
||||
void increment_splitDeaths() {
|
||||
void increment_split_deaths() {
|
||||
assert_proper_lock_protection();
|
||||
_allocation_stats.increment_splitDeaths();
|
||||
_allocation_stats.increment_split_deaths();
|
||||
}
|
||||
|
||||
NOT_PRODUCT(
|
||||
// For debugging. The "_returnedBytes" in all the lists are summed
|
||||
// For debugging. The "_returned_bytes" in all the lists are summed
|
||||
// and compared with the total number of bytes swept during a
|
||||
// collection.
|
||||
size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
|
||||
void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
|
||||
void increment_returnedBytes_by(size_t v) {
|
||||
_allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
|
||||
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
|
||||
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
|
||||
void increment_returned_bytes_by(size_t v) {
|
||||
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
|
||||
}
|
||||
)
|
||||
|
||||
// Unlink head of list and return it. Returns NULL if
|
||||
// the list is empty.
|
||||
FreeChunk* getChunkAtHead();
|
||||
Chunk* get_chunk_at_head();
|
||||
|
||||
// Remove the first "n" or "count", whichever is smaller, chunks from the
|
||||
// list, setting "fl", which is required to be empty, to point to them.
|
||||
void getFirstNChunksFromList(size_t n, FreeList* fl);
|
||||
void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl);
|
||||
|
||||
// Unlink this chunk from it's free list
|
||||
void removeChunk(FreeChunk* fc);
|
||||
void remove_chunk(Chunk* fc);
|
||||
|
||||
// Add this chunk to this free list.
|
||||
void returnChunkAtHead(FreeChunk* fc);
|
||||
void returnChunkAtTail(FreeChunk* fc);
|
||||
void return_chunk_at_head(Chunk* fc);
|
||||
void return_chunk_at_tail(Chunk* fc);
|
||||
|
||||
// Similar to returnChunk* but also records some diagnostic
|
||||
// information.
|
||||
void returnChunkAtHead(FreeChunk* fc, bool record_return);
|
||||
void returnChunkAtTail(FreeChunk* fc, bool record_return);
|
||||
void return_chunk_at_head(Chunk* fc, bool record_return);
|
||||
void return_chunk_at_tail(Chunk* fc, bool record_return);
|
||||
|
||||
// Prepend "fl" (whose size is required to be the same as that of "this")
|
||||
// to the front of "this" list.
|
||||
void prepend(FreeList* fl);
|
||||
void prepend(FreeList<Chunk>* fl);
|
||||
|
||||
// Verify that the chunk is in the list.
|
||||
// found. Return NULL if "fc" is not found.
|
||||
bool verifyChunkInFreeLists(FreeChunk* fc) const;
|
||||
bool verify_chunk_in_free_list(Chunk* fc) const;
|
||||
|
||||
// Stats verification
|
||||
void verify_stats() const PRODUCT_RETURN;
|
||||
|
@ -332,4 +326,4 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
|
|||
void print_on(outputStream* st, const char* c = NULL) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
|
||||
#endif // SHARE_VM_MEMORY_FREELIST_HPP
|
|
@ -68,7 +68,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
|
|||
ConcurrentMarkSweepGeneration* g = NULL;
|
||||
g = new ConcurrentMarkSweepGeneration(rs,
|
||||
init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
|
||||
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice);
|
||||
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
|
||||
|
||||
g->initialize_performance_counters();
|
||||
|
||||
|
@ -88,7 +88,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
|
|||
ASConcurrentMarkSweepGeneration* g = NULL;
|
||||
g = new ASConcurrentMarkSweepGeneration(rs,
|
||||
init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
|
||||
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice);
|
||||
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
|
||||
|
||||
g->initialize_performance_counters();
|
||||
|
||||
|
@ -175,7 +175,7 @@ PermGen* PermanentGenerationSpec::init(ReservedSpace rs,
|
|||
}
|
||||
// XXXPERM
|
||||
return new CMSPermGen(perm_rs, init_size, ctrs,
|
||||
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice);
|
||||
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
|
||||
}
|
||||
#endif // SERIALGC
|
||||
default:
|
||||
|
|
|
@ -128,11 +128,12 @@ klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_
|
|||
int static_field_size,
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
AccessFlags access_flags,
|
||||
ReferenceType rt, TRAPS) {
|
||||
ReferenceType rt,
|
||||
KlassHandle host_klass, TRAPS) {
|
||||
instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
|
||||
return ikk->allocate_instance_klass(name, vtable_len, itable_len,
|
||||
static_field_size, nonstatic_oop_map_count,
|
||||
access_flags, rt, CHECK_NULL);
|
||||
access_flags, rt, host_klass, CHECK_NULL);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -78,7 +78,8 @@ class oopFactory: AllStatic {
|
|||
int static_field_size,
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
AccessFlags access_flags,
|
||||
ReferenceType rt, TRAPS);
|
||||
ReferenceType rt,
|
||||
KlassHandle host_klass, TRAPS);
|
||||
|
||||
// Methods
|
||||
private:
|
||||
|
|
|
@ -1862,7 +1862,7 @@ void instanceKlass::follow_weak_klass_links(
|
|||
if (impl != NULL) {
|
||||
if (!is_alive->do_object_b(impl)) {
|
||||
// remove this guy
|
||||
*start_of_implementor() = NULL;
|
||||
*adr_implementor() = NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -78,6 +78,7 @@
|
|||
// The embedded nonstatic oop-map blocks are short pairs (offset, length)
|
||||
// indicating where oops are located in instances of this klass.
|
||||
// [EMBEDDED implementor of the interface] only exist for interface
|
||||
// [EMBEDDED host klass ] only exist for an anonymous class (JSR 292 enabled)
|
||||
|
||||
|
||||
// forward declaration for class -- see below for definition
|
||||
|
@ -176,10 +177,6 @@ class instanceKlass: public Klass {
|
|||
oop _class_loader;
|
||||
// Protection domain.
|
||||
oop _protection_domain;
|
||||
// Host class, which grants its access privileges to this class also.
|
||||
// This is only non-null for an anonymous class (JSR 292 enabled).
|
||||
// The host class is either named, or a previously loaded anonymous class.
|
||||
klassOop _host_klass;
|
||||
// Class signers.
|
||||
objArrayOop _signers;
|
||||
// The InnerClasses attribute and EnclosingMethod attribute. The
|
||||
|
@ -234,9 +231,13 @@ class instanceKlass: public Klass {
|
|||
int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
|
||||
|
||||
bool _is_marked_dependent; // used for marking during flushing and deoptimization
|
||||
bool _rewritten; // methods rewritten.
|
||||
bool _has_nonstatic_fields; // for sizing with UseCompressedOops
|
||||
bool _should_verify_class; // allow caching of preverification
|
||||
enum {
|
||||
_misc_rewritten = 1 << 0, // methods rewritten.
|
||||
_misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
|
||||
_misc_should_verify_class = 1 << 2, // allow caching of preverification
|
||||
_misc_is_anonymous = 1 << 3 // has embedded _inner_classes field
|
||||
};
|
||||
u2 _misc_flags;
|
||||
u2 _minor_version; // minor version number of class file
|
||||
u2 _major_version; // major version number of class file
|
||||
Thread* _init_thread; // Pointer to current thread doing initialization (to handle recusive initialization)
|
||||
|
@ -276,13 +277,29 @@ class instanceKlass: public Klass {
|
|||
// NULL: no implementor.
|
||||
// A klassOop that's not itself: one implementor.
|
||||
// Itsef: more than one implementors.
|
||||
// embedded host klass follows here
|
||||
// The embedded host klass only exists in an anonymous class for
|
||||
// dynamic language support (JSR 292 enabled). The host class grants
|
||||
// its access privileges to this class also. The host class is either
|
||||
// named, or a previously loaded anonymous class. A non-anonymous class
|
||||
// or an anonymous class loaded through normal classloading does not
|
||||
// have this embedded field.
|
||||
//
|
||||
|
||||
friend class instanceKlassKlass;
|
||||
friend class SystemDictionary;
|
||||
|
||||
public:
|
||||
bool has_nonstatic_fields() const { return _has_nonstatic_fields; }
|
||||
void set_has_nonstatic_fields(bool b) { _has_nonstatic_fields = b; }
|
||||
bool has_nonstatic_fields() const {
|
||||
return (_misc_flags & _misc_has_nonstatic_fields) != 0;
|
||||
}
|
||||
void set_has_nonstatic_fields(bool b) {
|
||||
if (b) {
|
||||
_misc_flags |= _misc_has_nonstatic_fields;
|
||||
} else {
|
||||
_misc_flags &= ~_misc_has_nonstatic_fields;
|
||||
}
|
||||
}
|
||||
|
||||
// field sizes
|
||||
int nonstatic_field_size() const { return _nonstatic_field_size; }
|
||||
|
@ -335,7 +352,7 @@ class instanceKlass: public Klass {
|
|||
int java_fields_count() const { return (int)_java_fields_count; }
|
||||
|
||||
// Number of fields including any injected fields
|
||||
int all_fields_count() const { return _fields->length() / sizeof(FieldInfo::field_slots); }
|
||||
int all_fields_count() const { return _fields->length() / FieldInfo::field_slots; }
|
||||
|
||||
typeArrayOop fields() const { return _fields; }
|
||||
|
||||
|
@ -396,11 +413,19 @@ class instanceKlass: public Klass {
|
|||
bool is_in_error_state() const { return _init_state == initialization_error; }
|
||||
bool is_reentrant_initialization(Thread *thread) { return thread == _init_thread; }
|
||||
ClassState init_state() { return (ClassState)_init_state; }
|
||||
bool is_rewritten() const { return _rewritten; }
|
||||
bool is_rewritten() const { return (_misc_flags & _misc_rewritten) != 0; }
|
||||
|
||||
// defineClass specified verification
|
||||
bool should_verify_class() const { return _should_verify_class; }
|
||||
void set_should_verify_class(bool value) { _should_verify_class = value; }
|
||||
bool should_verify_class() const {
|
||||
return (_misc_flags & _misc_should_verify_class) != 0;
|
||||
}
|
||||
void set_should_verify_class(bool value) {
|
||||
if (value) {
|
||||
_misc_flags |= _misc_should_verify_class;
|
||||
} else {
|
||||
_misc_flags &= ~_misc_should_verify_class;
|
||||
}
|
||||
}
|
||||
|
||||
// marking
|
||||
bool is_marked_dependent() const { return _is_marked_dependent; }
|
||||
|
@ -469,9 +494,30 @@ class instanceKlass: public Klass {
|
|||
void set_protection_domain(oop pd) { oop_store((oop*) &_protection_domain, pd); }
|
||||
|
||||
// host class
|
||||
oop host_klass() const { return _host_klass; }
|
||||
void set_host_klass(oop host) { oop_store((oop*) &_host_klass, host); }
|
||||
bool is_anonymous() const { return _host_klass != NULL; }
|
||||
oop host_klass() const {
|
||||
oop* hk = adr_host_klass();
|
||||
if (hk == NULL) {
|
||||
return NULL;
|
||||
} else {
|
||||
return *hk;
|
||||
}
|
||||
}
|
||||
void set_host_klass(oop host) {
|
||||
assert(is_anonymous(), "not anonymous");
|
||||
oop* addr = adr_host_klass();
|
||||
assert(addr != NULL, "no reversed space");
|
||||
oop_store(addr, host);
|
||||
}
|
||||
bool is_anonymous() const {
|
||||
return (_misc_flags & _misc_is_anonymous) != 0;
|
||||
}
|
||||
void set_is_anonymous(bool value) {
|
||||
if (value) {
|
||||
_misc_flags |= _misc_is_anonymous;
|
||||
} else {
|
||||
_misc_flags &= ~_misc_is_anonymous;
|
||||
}
|
||||
}
|
||||
|
||||
// signers
|
||||
objArrayOop signers() const { return _signers; }
|
||||
|
@ -651,7 +697,7 @@ class instanceKlass: public Klass {
|
|||
// Access to the implementor of an interface.
|
||||
klassOop implementor() const
|
||||
{
|
||||
klassOop* k = start_of_implementor();
|
||||
klassOop* k = (klassOop*)adr_implementor();
|
||||
if (k == NULL) {
|
||||
return NULL;
|
||||
} else {
|
||||
|
@ -661,7 +707,7 @@ class instanceKlass: public Klass {
|
|||
|
||||
void set_implementor(klassOop k) {
|
||||
assert(is_interface(), "not interface");
|
||||
oop* addr = (oop*)start_of_implementor();
|
||||
oop* addr = adr_implementor();
|
||||
oop_store_without_check(addr, k);
|
||||
}
|
||||
|
||||
|
@ -717,9 +763,11 @@ class instanceKlass: public Klass {
|
|||
{
|
||||
return object_size(align_object_offset(vtable_length()) +
|
||||
align_object_offset(itable_length()) +
|
||||
(is_interface() ?
|
||||
(align_object_offset(nonstatic_oop_map_size()) + (int)sizeof(klassOop)/HeapWordSize) :
|
||||
nonstatic_oop_map_size()));
|
||||
((is_interface() || is_anonymous()) ?
|
||||
align_object_offset(nonstatic_oop_map_size()) :
|
||||
nonstatic_oop_map_size()) +
|
||||
(is_interface() ? (int)sizeof(klassOop)/HeapWordSize : 0) +
|
||||
(is_anonymous() ? (int)sizeof(klassOop)/HeapWordSize : 0));
|
||||
}
|
||||
static int vtable_start_offset() { return header_size(); }
|
||||
static int vtable_length_offset() { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; }
|
||||
|
@ -737,15 +785,29 @@ class instanceKlass: public Klass {
|
|||
return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length()));
|
||||
}
|
||||
|
||||
klassOop* start_of_implementor() const {
|
||||
oop* adr_implementor() const {
|
||||
if (is_interface()) {
|
||||
return (klassOop*)(start_of_nonstatic_oop_maps() +
|
||||
return (oop*)(start_of_nonstatic_oop_maps() +
|
||||
nonstatic_oop_map_count());
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
||||
oop* adr_host_klass() const {
|
||||
if (is_anonymous()) {
|
||||
oop* adr_impl = adr_implementor();
|
||||
if (adr_impl != NULL) {
|
||||
return adr_impl + 1;
|
||||
} else {
|
||||
return (oop*)(start_of_nonstatic_oop_maps() +
|
||||
nonstatic_oop_map_count());
|
||||
}
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocation profiling support
|
||||
juint alloc_size() const { return _alloc_count * size_helper(); }
|
||||
void set_alloc_size(juint n) {}
|
||||
|
@ -819,7 +881,7 @@ private:
|
|||
#else
|
||||
void set_init_state(ClassState state) { _init_state = (u1)state; }
|
||||
#endif
|
||||
void set_rewritten() { _rewritten = true; }
|
||||
void set_rewritten() { _misc_flags |= _misc_rewritten; }
|
||||
void set_init_thread(Thread *thread) { _init_thread = thread; }
|
||||
|
||||
u2 idnum_allocated_count() const { return _idnum_allocated_count; }
|
||||
|
@ -852,10 +914,8 @@ private:
|
|||
oop* adr_constants() const { return (oop*)&this->_constants;}
|
||||
oop* adr_class_loader() const { return (oop*)&this->_class_loader;}
|
||||
oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;}
|
||||
oop* adr_host_klass() const { return (oop*)&this->_host_klass;}
|
||||
oop* adr_signers() const { return (oop*)&this->_signers;}
|
||||
oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;}
|
||||
oop* adr_implementor() const { return (oop*)start_of_implementor(); }
|
||||
oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;}
|
||||
oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;}
|
||||
oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;}
|
||||
|
|
|
@ -103,7 +103,9 @@ void instanceKlassKlass::oop_follow_contents(oop obj) {
|
|||
MarkSweep::mark_and_push(ik->adr_class_loader());
|
||||
MarkSweep::mark_and_push(ik->adr_inner_classes());
|
||||
MarkSweep::mark_and_push(ik->adr_protection_domain());
|
||||
if (ik->adr_host_klass() != NULL) {
|
||||
MarkSweep::mark_and_push(ik->adr_host_klass());
|
||||
}
|
||||
MarkSweep::mark_and_push(ik->adr_signers());
|
||||
MarkSweep::mark_and_push(ik->adr_class_annotations());
|
||||
MarkSweep::mark_and_push(ik->adr_fields_annotations());
|
||||
|
@ -139,7 +141,9 @@ void instanceKlassKlass::oop_follow_contents(ParCompactionManager* cm,
|
|||
PSParallelCompact::mark_and_push(cm, ik->adr_class_loader());
|
||||
PSParallelCompact::mark_and_push(cm, ik->adr_inner_classes());
|
||||
PSParallelCompact::mark_and_push(cm, ik->adr_protection_domain());
|
||||
if (ik->adr_host_klass() != NULL) {
|
||||
PSParallelCompact::mark_and_push(cm, ik->adr_host_klass());
|
||||
}
|
||||
PSParallelCompact::mark_and_push(cm, ik->adr_signers());
|
||||
PSParallelCompact::mark_and_push(cm, ik->adr_class_annotations());
|
||||
PSParallelCompact::mark_and_push(cm, ik->adr_fields_annotations());
|
||||
|
@ -177,10 +181,12 @@ int instanceKlassKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
|
|||
blk->do_oop(ik->adr_constants());
|
||||
blk->do_oop(ik->adr_class_loader());
|
||||
blk->do_oop(ik->adr_protection_domain());
|
||||
if (ik->adr_host_klass() != NULL) {
|
||||
blk->do_oop(ik->adr_host_klass());
|
||||
}
|
||||
blk->do_oop(ik->adr_signers());
|
||||
blk->do_oop(ik->adr_inner_classes());
|
||||
if (ik->is_interface()) {
|
||||
if (ik->adr_implementor() != NULL) {
|
||||
blk->do_oop(ik->adr_implementor());
|
||||
}
|
||||
blk->do_oop(ik->adr_class_annotations());
|
||||
|
@ -227,15 +233,13 @@ int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
|
|||
adr = ik->adr_protection_domain();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
adr = ik->adr_host_klass();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
if (adr != NULL && mr.contains(adr)) blk->do_oop(adr);
|
||||
adr = ik->adr_signers();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
adr = ik->adr_inner_classes();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
if (ik->is_interface()) {
|
||||
adr = ik->adr_implementor();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
}
|
||||
if (adr != NULL && mr.contains(adr)) blk->do_oop(adr);
|
||||
adr = ik->adr_class_annotations();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
adr = ik->adr_fields_annotations();
|
||||
|
@ -270,10 +274,12 @@ int instanceKlassKlass::oop_adjust_pointers(oop obj) {
|
|||
MarkSweep::adjust_pointer(ik->adr_constants());
|
||||
MarkSweep::adjust_pointer(ik->adr_class_loader());
|
||||
MarkSweep::adjust_pointer(ik->adr_protection_domain());
|
||||
if (ik->adr_host_klass() != NULL) {
|
||||
MarkSweep::adjust_pointer(ik->adr_host_klass());
|
||||
}
|
||||
MarkSweep::adjust_pointer(ik->adr_signers());
|
||||
MarkSweep::adjust_pointer(ik->adr_inner_classes());
|
||||
if (ik->is_interface()) {
|
||||
if (ik->adr_implementor() != NULL) {
|
||||
MarkSweep::adjust_pointer(ik->adr_implementor());
|
||||
}
|
||||
MarkSweep::adjust_pointer(ik->adr_class_annotations());
|
||||
|
@ -302,7 +308,7 @@ void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
|||
}
|
||||
|
||||
oop* hk_addr = ik->adr_host_klass();
|
||||
if (PSScavenge::should_scavenge(hk_addr)) {
|
||||
if (hk_addr != NULL && PSScavenge::should_scavenge(hk_addr)) {
|
||||
pm->claim_or_forward_depth(hk_addr);
|
||||
}
|
||||
|
||||
|
@ -328,9 +334,13 @@ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
|||
for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
|
||||
PSParallelCompact::adjust_pointer(cur_oop);
|
||||
}
|
||||
if (ik->is_interface()) {
|
||||
// embedded oops
|
||||
if (ik->adr_implementor() != NULL) {
|
||||
PSParallelCompact::adjust_pointer(ik->adr_implementor());
|
||||
}
|
||||
if (ik->adr_host_klass() != NULL) {
|
||||
PSParallelCompact::adjust_pointer(ik->adr_host_klass());
|
||||
}
|
||||
|
||||
OopClosure* closure = PSParallelCompact::adjust_root_pointer_closure();
|
||||
iterate_c_heap_oops(ik, closure);
|
||||
|
@ -346,16 +356,23 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it
|
|||
int static_field_size,
|
||||
unsigned nonstatic_oop_map_count,
|
||||
AccessFlags access_flags,
|
||||
ReferenceType rt, TRAPS) {
|
||||
ReferenceType rt,
|
||||
KlassHandle host_klass, TRAPS) {
|
||||
|
||||
const int nonstatic_oop_map_size =
|
||||
instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count);
|
||||
int size = align_object_offset(vtable_len) + align_object_offset(itable_len);
|
||||
if (access_flags.is_interface()) {
|
||||
size += align_object_offset(nonstatic_oop_map_size) + (int)sizeof(klassOop)/HeapWordSize;
|
||||
if (access_flags.is_interface() || !host_klass.is_null()) {
|
||||
size += align_object_offset(nonstatic_oop_map_size);
|
||||
} else {
|
||||
size += nonstatic_oop_map_size;
|
||||
}
|
||||
if (access_flags.is_interface()) {
|
||||
size += (int)sizeof(klassOop)/HeapWordSize;
|
||||
}
|
||||
if (!host_klass.is_null()) {
|
||||
size += (int)sizeof(klassOop)/HeapWordSize;
|
||||
}
|
||||
size = instanceKlass::object_size(size);
|
||||
|
||||
// Allocation
|
||||
|
@ -389,6 +406,7 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it
|
|||
ik->set_static_field_size(static_field_size);
|
||||
ik->set_nonstatic_oop_map_size(nonstatic_oop_map_size);
|
||||
ik->set_access_flags(access_flags);
|
||||
ik->set_is_anonymous(!host_klass.is_null());
|
||||
assert(k()->size() == size, "wrong size for object");
|
||||
|
||||
ik->set_array_klasses(NULL);
|
||||
|
@ -401,7 +419,6 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it
|
|||
ik->set_constants(NULL);
|
||||
ik->set_class_loader(NULL);
|
||||
ik->set_protection_domain(NULL);
|
||||
ik->set_host_klass(NULL);
|
||||
ik->set_signers(NULL);
|
||||
ik->set_source_file_name(NULL);
|
||||
ik->set_source_debug_extension(NULL);
|
||||
|
@ -503,7 +520,9 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) {
|
|||
st->print(BULLET"constants: "); ik->constants()->print_value_on(st); st->cr();
|
||||
st->print(BULLET"class loader: "); ik->class_loader()->print_value_on(st); st->cr();
|
||||
st->print(BULLET"protection domain: "); ik->protection_domain()->print_value_on(st); st->cr();
|
||||
if (ik->host_klass() != NULL) {
|
||||
st->print(BULLET"host class: "); ik->host_klass()->print_value_on(st); st->cr();
|
||||
}
|
||||
st->print(BULLET"signers: "); ik->signers()->print_value_on(st); st->cr();
|
||||
if (ik->source_file_name() != NULL) {
|
||||
st->print(BULLET"source file: ");
|
||||
|
|
|
@ -48,6 +48,7 @@ class instanceKlassKlass : public klassKlass {
|
|||
unsigned int nonstatic_oop_map_count,
|
||||
AccessFlags access_flags,
|
||||
ReferenceType rt,
|
||||
KlassHandle host_klass,
|
||||
TRAPS);
|
||||
|
||||
// Casting from klassOop
|
||||
|
|
|
@ -137,6 +137,7 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
|||
}
|
||||
|
||||
CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci());
|
||||
_call_node = call; // Save the call node in case we need it later
|
||||
if (!is_static) {
|
||||
// Make an explicit receiver null_check as part of this call.
|
||||
// Since we share a map with the caller, his JVMS gets adjusted.
|
||||
|
@ -155,7 +156,6 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
|||
kit.set_edges_for_java_call(call, false, _separate_io_proj);
|
||||
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
|
||||
kit.push_node(method()->return_type()->basic_type(), ret);
|
||||
_call_node = call; // Save the call node in case we need it later
|
||||
return kit.transfer_exceptions_into_jvms();
|
||||
}
|
||||
|
||||
|
|
|
@ -897,8 +897,8 @@ bool StringConcat::validate_control_flow() {
|
|||
}
|
||||
|
||||
Node* PhaseStringOpts::fetch_static_field(GraphKit& kit, ciField* field) {
|
||||
const TypeKlassPtr* klass_type = TypeKlassPtr::make(field->holder());
|
||||
Node* klass_node = __ makecon(klass_type);
|
||||
const TypeInstPtr* mirror_type = TypeInstPtr::make(field->holder()->java_mirror());
|
||||
Node* klass_node = __ makecon(mirror_type);
|
||||
BasicType bt = field->layout_type();
|
||||
ciType* field_klass = field->type();
|
||||
|
||||
|
@ -913,6 +913,7 @@ Node* PhaseStringOpts::fetch_static_field(GraphKit& kit, ciField* field) {
|
|||
// and may yield a vacuous result if the field is of interface type.
|
||||
type = TypeOopPtr::make_from_constant(con, true)->isa_oopptr();
|
||||
assert(type != NULL, "field singleton type must be consistent");
|
||||
return __ makecon(type);
|
||||
} else {
|
||||
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
|
||||
}
|
||||
|
@ -922,7 +923,7 @@ Node* PhaseStringOpts::fetch_static_field(GraphKit& kit, ciField* field) {
|
|||
|
||||
return kit.make_load(NULL, kit.basic_plus_adr(klass_node, field->offset_in_bytes()),
|
||||
type, T_OBJECT,
|
||||
C->get_alias_index(klass_type->add_offset(field->offset_in_bytes())));
|
||||
C->get_alias_index(mirror_type->add_offset(field->offset_in_bytes())));
|
||||
}
|
||||
|
||||
Node* PhaseStringOpts::int_stringSize(GraphKit& kit, Node* arg) {
|
||||
|
|
|
@ -1221,12 +1221,11 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) {
|
|||
return opd; // input is matching vector
|
||||
}
|
||||
assert(!opd->is_VectorStore(), "such vector is not expected here");
|
||||
// Convert scalar input to vector. Use p0's type because it's container
|
||||
// maybe smaller than the operand's container.
|
||||
const Type* opd_t = velt_type(!in_bb(opd) ? p0 : opd);
|
||||
// Convert scalar input to vector with the same number of elements as
|
||||
// p0's vector. Use p0's type because size of operand's container in
|
||||
// vector should match p0's size regardless operand's size.
|
||||
const Type* p0_t = velt_type(p0);
|
||||
if (p0_t->higher_equal(opd_t)) opd_t = p0_t;
|
||||
VectorNode* vn = VectorNode::scalar2vector(_phase->C, opd, vlen, opd_t);
|
||||
VectorNode* vn = VectorNode::scalar2vector(_phase->C, opd, vlen, p0_t);
|
||||
|
||||
_phase->_igvn.register_new_node_with_optimizer(vn);
|
||||
_phase->set_ctrl(vn, _phase->get_ctrl(opd));
|
||||
|
@ -1234,14 +1233,15 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) {
|
|||
}
|
||||
|
||||
// Insert pack operation
|
||||
const Type* opd_t = velt_type(!in_bb(opd) ? p0 : opd);
|
||||
PackNode* pk = PackNode::make(_phase->C, opd, opd_t);
|
||||
const Type* p0_t = velt_type(p0);
|
||||
PackNode* pk = PackNode::make(_phase->C, opd, p0_t);
|
||||
DEBUG_ONLY( const BasicType opd_bt = opd->bottom_type()->basic_type(); )
|
||||
|
||||
for (uint i = 1; i < vlen; i++) {
|
||||
Node* pi = p->at(i);
|
||||
Node* in = pi->in(opd_idx);
|
||||
assert(my_pack(in) == NULL, "Should already have been unpacked");
|
||||
assert(opd_t == velt_type(!in_bb(in) ? pi : in), "all same type");
|
||||
assert(opd_bt == in->bottom_type()->basic_type(), "all same type");
|
||||
pk->add_opd(in);
|
||||
}
|
||||
_phase->_igvn.register_new_node_with_optimizer(pk);
|
||||
|
|
|
@ -293,13 +293,10 @@
|
|||
# include "c1/c1_globals.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifndef SERIALGC
|
||||
# include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
|
||||
# include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
|
||||
# include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
|
||||
# include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
|
||||
# include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
|
||||
# include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
|
||||
# include "gc_implementation/concurrentMarkSweep/freeList.hpp"
|
||||
# include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
|
||||
# include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||
# include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#include "code/vmreg.hpp"
|
||||
#include "compiler/oopMap.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
|
||||
#include "gc_implementation/shared/immutableSpace.hpp"
|
||||
#include "gc_implementation/shared/markSweep.hpp"
|
||||
#include "gc_implementation/shared/mutableSpace.hpp"
|
||||
|
@ -55,6 +54,7 @@
|
|||
#include "memory/cardTableRS.hpp"
|
||||
#include "memory/compactPermGen.hpp"
|
||||
#include "memory/defNewGeneration.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/generationSpec.hpp"
|
||||
|
|
|
@ -13,7 +13,18 @@
|
|||
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
|
||||
no. 3, pp 130-137,
|
||||
|
||||
See also http://www.tartarus.org/~martin/PorterStemmer
|
||||
http://www.tartarus.org/~martin/PorterStemmer
|
||||
|
||||
The software is completely free for any purpose, unless notes at the head
|
||||
of the program text indicates otherwise (which is rare). In any case,
|
||||
the notes about licensing are never more restrictive than the BSD License.
|
||||
|
||||
In every case where the software is not written by me (Martin Porter),
|
||||
this licensing arrangement has been endorsed by the contributor, and it is
|
||||
therefore unnecessary to ask the contributor again to confirm it.
|
||||
|
||||
I have not asked any contributors (or their employers, if they have them)
|
||||
for proofs that they have the right to distribute their software in this way.
|
||||
|
||||
History:
|
||||
|
||||
|
|
71
hotspot/test/compiler/7160610/Test7160610.java
Normal file
71
hotspot/test/compiler/7160610/Test7160610.java
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 7160610
|
||||
* @summary Unknown Native Code compilation issue.
|
||||
*
|
||||
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-OptimizeFill Test7160610
|
||||
*/
|
||||
|
||||
public class Test7160610 {
|
||||
private static final byte[] BYTE_ARRAY = new byte[7];
|
||||
private static int[] anIntArray1190 = new int[32768];
|
||||
private static int[] anIntArray1191 = new int[32768];
|
||||
|
||||
public static void main(String arg[]) {
|
||||
int i = 256;
|
||||
for(int j = BYTE_ARRAY[2]; j < anIntArray1190.length; j++) {
|
||||
anIntArray1190[j] = BYTE_ARRAY[2];
|
||||
}
|
||||
|
||||
for(int k = BYTE_ARRAY[2]; (k ^ BYTE_ARRAY[1]) > -5001; k++) {
|
||||
int i1 = (int)(Math.random() * 128D * (double)i);
|
||||
anIntArray1190[i1] = (int)(Math.random() * 256D);
|
||||
}
|
||||
|
||||
for(int l = BYTE_ARRAY[2]; (l ^ BYTE_ARRAY[1]) > -21; l++) {
|
||||
for(int j1 = BYTE_ARRAY[0]; j1 < i + -BYTE_ARRAY[0]; j1++) {
|
||||
for(int k1 = BYTE_ARRAY[0]; (k1 ^ BYTE_ARRAY[1]) > -128; k1++) {
|
||||
int l1 = k1 - -(j1 << 0x26cb6487);
|
||||
anIntArray1191[l1] = (anIntArray1190[l1 + -BYTE_ARRAY[0]] - -anIntArray1190[l1 - -BYTE_ARRAY[0]] - -anIntArray1190[-128 + l1] - -anIntArray1190[128 + l1]) / BYTE_ARRAY[6];
|
||||
}
|
||||
}
|
||||
int ai[] = anIntArray1190;
|
||||
anIntArray1190 = anIntArray1191;
|
||||
anIntArray1191 = ai;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
BYTE_ARRAY[6] = 4;
|
||||
BYTE_ARRAY[5] = 5;
|
||||
BYTE_ARRAY[4] = 3;
|
||||
BYTE_ARRAY[3] = 2;
|
||||
BYTE_ARRAY[2] = 0;
|
||||
BYTE_ARRAY[1] = -1;
|
||||
BYTE_ARRAY[0] = 1;
|
||||
}
|
||||
}
|
|
@ -157,3 +157,4 @@ f3244c1f04864d35c41fa8d13669faf4f65b81e2 jdk8-b28
|
|||
9bcab2b8b8ea578e594916a3d3df6dbec7984bcb jdk8-b33
|
||||
8b91a897a04486cf901af0de7f684a3eb31f121f jdk8-b34
|
||||
e187f3ede64965dc2979df9a211107cd3d38eacb jdk8-b35
|
||||
cfd288fe1d3e2b700838342e70d71d44ac991af5 jdk8-b36
|
||||
|
|
|
@ -47,7 +47,7 @@ $(eval $(call SetupJavaCompiler,GENERATE_NEWBYTECODE_DEBUG,\
|
|||
|
||||
$(eval $(call SetupJavaCompilation,BUILD_JAXP,\
|
||||
SETUP:=GENERATE_NEWBYTECODE_DEBUG,\
|
||||
SRC:=$(JAXP_TOPDIR)/src/share/classes,\
|
||||
SRC:=$(JAXP_TOPDIR)/src,\
|
||||
CLEAN:=.properties,\
|
||||
BIN:=$(JAXP_OUTPUTDIR)/classes,\
|
||||
SRCZIP:=$(JAXP_OUTPUTDIR)/dist/lib/src.zip))
|
||||
|
|
|
@ -157,3 +157,4 @@ b376d901e006cd9e0c59733c84e190aace23eec6 jdk8-b25
|
|||
ea80b2388dce711fbde8e4fd6e07c2c64ad16743 jdk8-b33
|
||||
f1d020a49c8c33667fb10c8caa255206a78a3675 jdk8-b34
|
||||
e8afc16522e190cb93c66bcb15d6fba0fe9e6833 jdk8-b35
|
||||
89b36c658e39f0a2957be55453a3a3befd9c8a6b jdk8-b36
|
||||
|
|
|
@ -157,3 +157,4 @@ ddfe5562f61f54ed2121ac0c73b688b94f3e66b5 jdk8-b32
|
|||
78cea258caaba3980ba186c426da82c8fe41bfd7 jdk8-b33
|
||||
29b680393f33bf953688c17d93aca7a870ca4024 jdk8-b34
|
||||
2e3e1356ffbddb2ae95c08da72830ba9ab8b3181 jdk8-b35
|
||||
45da9cb055ee258dc09e69c1718e27eadea38e45 jdk8-b36
|
||||
|
|
|
@ -157,3 +157,4 @@ be069d72dde2bfe6f996c46325a320961ca854c2 jdk8-b32
|
|||
46831c72b7f6c69fef2cc2935001863643a65f94 jdk8-b33
|
||||
6b105afbb77ca9600a99eade31f686d070c70581 jdk8-b34
|
||||
defd666a786334465496c8901fa302b779c7e045 jdk8-b35
|
||||
94bbaa67686f44a124cd16fd9f1e8a6a3f684d2d jdk8-b36
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue