metaspace.cpp 120.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/binaryTreeDictionary.hpp"
27
#include "memory/freeList.hpp"
28 29 30
#include "memory/collectorPolicy.hpp"
#include "memory/filemap.hpp"
#include "memory/freeList.hpp"
31 32
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
33 34 35 36 37
#include "memory/metaspace.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/globals.hpp"
38
#include "runtime/java.hpp"
39
#include "runtime/mutex.hpp"
40
#include "runtime/orderAccess.hpp"
41 42 43 44
#include "services/memTracker.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"

45 46
typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
47 48 49 50
// Define this macro to enable slow integrity checking of
// the free chunk lists
const bool metaspace_slow_verify = false;

51 52 53
// Parameters for stress mode testing
const uint metadata_deallocate_a_lot_block = 10;
const uint metadata_deallocate_a_lock_chunk = 3;
54
size_t const allocation_from_dictionary_limit = 4 * K;
55 56 57

MetaWord* last_allocated = 0;

58 59
size_t Metaspace::_class_metaspace_size;

60 61
// Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex {
62 63 64 65 66 67 68 69 70 71 72 73 74 75
  ZeroIndex = 0,
  SpecializedIndex = ZeroIndex,
  SmallIndex = SpecializedIndex + 1,
  MediumIndex = SmallIndex + 1,
  HumongousIndex = MediumIndex + 1,
  NumberOfFreeLists = 3,
  NumberOfInUseLists = 4
};

enum ChunkSizes {    // in words.
  ClassSpecializedChunk = 128,
  SpecializedChunk = 128,
  ClassSmallChunk = 256,
  SmallChunk = 512,
76
  ClassMediumChunk = 4 * K,
77 78
  MediumChunk = 8 * K,
  HumongousChunkGranularity = 8
79 80 81
};

static ChunkIndex next_chunk_index(ChunkIndex i) {
82
  assert(i < NumberOfInUseLists, "Out of bound");
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
  return (ChunkIndex) (i+1);
}

// Originally _capacity_until_GC was set to MetaspaceSize here but
// the default MetaspaceSize before argument processing was being
// used which was not the desired value.  See the code
// in should_expand() to see how the initialization is handled
// now.
size_t MetaspaceGC::_capacity_until_GC = 0;
bool MetaspaceGC::_expand_after_GC = false;
uint MetaspaceGC::_shrink_factor = 0;
bool MetaspaceGC::_should_concurrent_collect = false;

// Blocks of space for metadata are allocated out of Metachunks.
//
// Metachunk are allocated out of MetadataVirtualspaces and once
// allocated there is no explicit link between a Metachunk and
// the MetadataVirtualspaces from which it was allocated.
//
// Each SpaceManager maintains a
// list of the chunks it is using and the current chunk.  The current
// chunk is the chunk from which allocations are done.  Space freed in
// a chunk is placed on the free list of blocks (BlockFreelist) and
// reused from there.

108
typedef class FreeList<Metachunk> ChunkList;
109 110 111 112 113 114 115 116

// Manages the global free lists of chunks.
// Has three lists of free chunks, and a total size and
// count that includes all three

class ChunkManager VALUE_OBJ_CLASS_SPEC {

  // Free list of chunks of different sizes.
117
  //   SpecializedChunk
118 119 120
  //   SmallChunk
  //   MediumChunk
  //   HumongousChunk
121 122
  ChunkList _free_chunks[NumberOfFreeLists];

123

124 125
  //   HumongousChunk
  ChunkTreeDictionary _humongous_dictionary;
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

  // ChunkManager in all lists of this type
  size_t _free_chunks_total;
  size_t _free_chunks_count;

  void dec_free_chunks_total(size_t v) {
    assert(_free_chunks_count > 0 &&
             _free_chunks_total > 0,
             "About to go negative");
    Atomic::add_ptr(-1, &_free_chunks_count);
    jlong minus_v = (jlong) - (jlong) v;
    Atomic::add_ptr(minus_v, &_free_chunks_total);
  }

  // Debug support

  size_t sum_free_chunks();
  size_t sum_free_chunks_count();

  void locked_verify_free_chunks_total();
146 147 148 149 150
  void slow_locked_verify_free_chunks_total() {
    if (metaspace_slow_verify) {
      locked_verify_free_chunks_total();
    }
  }
151
  void locked_verify_free_chunks_count();
152 153 154 155 156
  void slow_locked_verify_free_chunks_count() {
    if (metaspace_slow_verify) {
      locked_verify_free_chunks_count();
    }
  }
157 158 159 160 161 162 163 164 165 166
  void verify_free_chunks_count();

 public:

  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}

  // add or delete (return) a chunk to the global freelist.
  Metachunk* chunk_freelist_allocate(size_t word_size);
  void chunk_freelist_deallocate(Metachunk* chunk);

167 168 169 170
  // Map a size to a list index assuming that there are lists
  // for special, small, medium, and humongous chunks.
  static ChunkIndex list_index(size_t size);

171 172 173 174
  // Remove the chunk from its freelist.  It is
  // expected to be on one of the _free_chunks[] lists.
  void remove_chunk(Metachunk* chunk);

175 176 177 178
  // Add the simple linked list of chunks to the freelist of chunks
  // of type index.
  void return_chunks(ChunkIndex index, Metachunk* chunks);

179
  // Total of the space in the free chunks list
E
ehelin 已提交
180 181
  size_t free_chunks_total_words();
  size_t free_chunks_total_bytes();
182 183 184 185 186 187 188 189

  // Number of chunks in the free chunks list
  size_t free_chunks_count();

  void inc_free_chunks_total(size_t v, size_t count = 1) {
    Atomic::add_ptr(count, &_free_chunks_count);
    Atomic::add_ptr(v, &_free_chunks_total);
  }
190 191 192
  ChunkTreeDictionary* humongous_dictionary() {
    return &_humongous_dictionary;
  }
193 194 195 196 197 198 199 200 201 202 203 204 205

  ChunkList* free_chunks(ChunkIndex index);

  // Returns the list for the given chunk word size.
  ChunkList* find_free_chunks_list(size_t word_size);

  // Add and remove from a list by size.  Selects
  // list based on size of chunk.
  void free_chunks_put(Metachunk* chuck);
  Metachunk* free_chunks_get(size_t chunk_word_size);

  // Debug support
  void verify();
206 207 208 209 210
  void slow_verify() {
    if (metaspace_slow_verify) {
      verify();
    }
  }
211
  void locked_verify();
212 213 214 215 216
  void slow_locked_verify() {
    if (metaspace_slow_verify) {
      locked_verify();
    }
  }
217 218 219 220
  void verify_free_chunks_total();

  void locked_print_free_chunks(outputStream* st);
  void locked_print_sum_free_chunks(outputStream* st);
221 222

  void print_on(outputStream* st);
223 224 225 226 227
};

// Used to manage the free list of Metablocks (a block corresponds
// to the allocation of a quantum of metadata).
class BlockFreelist VALUE_OBJ_CLASS_SPEC {
228 229
  BlockTreeDictionary* _dictionary;
  static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
230

231 232 233 234
  // Only allocate and split from freelist if the size of the allocation
  // is at least 1/4th the size of the available block.
  const static int WasteMultiplier = 4;

235
  // Accessors
236
  BlockTreeDictionary* dictionary() const { return _dictionary; }
237 238 239 240 241 242

 public:
  BlockFreelist();
  ~BlockFreelist();

  // Get and return a block to the free list
243 244
  MetaWord* get_block(size_t word_size);
  void return_block(MetaWord* p, size_t word_size);
245

246 247
  size_t total_size() {
  if (dictionary() == NULL) {
248
    return 0;
249 250
  } else {
    return dictionary()->total_size();
251
  }
252
}
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267

  void print_on(outputStream* st) const;
};

class VirtualSpaceNode : public CHeapObj<mtClass> {
  friend class VirtualSpaceList;

  // Link to next VirtualSpaceNode
  VirtualSpaceNode* _next;

  // total in the VirtualSpace
  MemRegion _reserved;
  ReservedSpace _rs;
  VirtualSpace _virtual_space;
  MetaWord* _top;
268 269
  // count of chunks contained in this VirtualSpace
  uintx _container_count;
270 271 272 273 274

  // Convenience functions to access the _virtual_space
  char* low()  const { return virtual_space()->low(); }
  char* high() const { return virtual_space()->high(); }

275 276 277 278 279 280 281 282 283
  // The first Metachunk will be allocated at the bottom of the
  // VirtualSpace
  Metachunk* first_chunk() { return (Metachunk*) bottom(); }

  void inc_container_count();
#ifdef ASSERT
  uint container_count_slow();
#endif

284 285 286
 public:

  VirtualSpaceNode(size_t byte_size);
287
  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
288 289
  ~VirtualSpaceNode();

290 291 292 293
  // Convenience functions for logical bottom and end
  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }

294 295 296 297
  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }

298 299 300 301 302 303 304 305 306 307 308 309
  // address of next available space in _virtual_space;
  // Accessors
  VirtualSpaceNode* next() { return _next; }
  void set_next(VirtualSpaceNode* v) { _next = v; }

  void set_reserved(MemRegion const v) { _reserved = v; }
  void set_top(MetaWord* v) { _top = v; }

  // Accessors
  MemRegion* reserved() { return &_reserved; }
  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }

310
  // Returns true if "word_size" is available in the VirtualSpace
311 312 313 314 315
  bool is_available(size_t word_size) { return _top + word_size <= end(); }

  MetaWord* top() const { return _top; }
  void inc_top(size_t word_size) { _top += word_size; }

316 317 318 319 320 321
  uintx container_count() { return _container_count; }
  void dec_container_count();
#ifdef ASSERT
  void verify_container_count();
#endif

322 323 324
  // used and capacity in this single entry in the list
  size_t used_words_in_vs() const;
  size_t capacity_words_in_vs() const;
325
  size_t free_words_in_vs() const;
326 327 328 329 330 331 332 333 334 335 336 337 338

  bool initialize();

  // get space from the virtual space
  Metachunk* take_from_committed(size_t chunk_word_size);

  // Allocate a chunk from the virtual space and return it.
  Metachunk* get_chunk_vs(size_t chunk_word_size);

  // Expands/shrinks the committed space in a virtual space.  Delegates
  // to Virtualspace
  bool expand_by(size_t words, bool pre_touch = false);

339 340 341 342
  // In preparation for deleting this node, remove all the chunks
  // in the node from any freelist.
  void purge(ChunkManager* chunk_manager);

343
#ifdef ASSERT
344 345
  // Debug support
  void mangle();
346
#endif
347 348 349 350 351

  void print_on(outputStream* st) const;
};

  // byte_size is the size of the associated virtualspace.
352
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
353 354 355
  // align up to vm allocation granularity
  byte_size = align_size_up(byte_size, os::vm_allocation_granularity());

356 357 358
  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
  // configurable address, generally at the top of the Java heap so other
  // memory addresses don't conflict.
359
  if (DumpSharedSpaces) {
360
    char* shared_base = (char*)SharedBaseAddress;
361 362
    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
    if (_rs.is_reserved()) {
363
      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
364
    } else {
365
      // Get a mmap region anywhere if the SharedBaseAddress fails.
366 367 368 369 370 371 372 373 374 375
      _rs = ReservedSpace(byte_size);
    }
    MetaspaceShared::set_shared_rs(&_rs);
  } else {
    _rs = ReservedSpace(byte_size);
  }

  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
  Metachunk* chunk = first_chunk();
  Metachunk* invalid_chunk = (Metachunk*) top();
  while (chunk < invalid_chunk ) {
    assert(chunk->is_free(), "Should be marked free");
      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
      chunk_manager->remove_chunk(chunk);
      assert(chunk->next() == NULL &&
             chunk->prev() == NULL,
             "Was not removed from its list");
      chunk = (Metachunk*) next;
  }
}

#ifdef ASSERT
uint VirtualSpaceNode::container_count_slow() {
  uint count = 0;
  Metachunk* chunk = first_chunk();
  Metachunk* invalid_chunk = (Metachunk*) top();
  while (chunk < invalid_chunk ) {
    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
    // Don't count the chunks on the free lists.  Those are
    // still part of the VirtualSpaceNode but not currently
    // counted.
    if (!chunk->is_free()) {
      count++;
    }
    chunk = (Metachunk*) next;
  }
  return count;
}
#endif

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
// List of VirtualSpaces for metadata allocation.
// It has a  _next link for singly linked list and a MemRegion
// for total space in the VirtualSpace.
class VirtualSpaceList : public CHeapObj<mtClass> {
  friend class VirtualSpaceNode;

  enum VirtualSpaceSizes {
    VirtualSpaceSize = 256 * K
  };

  // Global list of virtual spaces
  // Head of the list
  VirtualSpaceNode* _virtual_space_list;
  // virtual space currently being used for allocations
  VirtualSpaceNode* _current_virtual_space;
  // Free chunk list for all other metadata
  ChunkManager      _chunk_manager;

  // Can this virtual list allocate >1 spaces?  Also, used to determine
  // whether to allocate unlimited small chunks in this virtual space
  bool _is_class;
430
  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
431

432 433 434 435 436
  // Sum of reserved and committed memory in the virtual spaces
  size_t _reserved_words;
  size_t _committed_words;

  // Number of virtual spaces
437 438 439 440 441 442 443 444 445 446 447 448 449
  size_t _virtual_space_count;

  ~VirtualSpaceList();

  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }

  void set_virtual_space_list(VirtualSpaceNode* v) {
    _virtual_space_list = v;
  }
  void set_current_virtual_space(VirtualSpaceNode* v) {
    _current_virtual_space = v;
  }

450
  void link_vs(VirtualSpaceNode* new_entry);
451 452 453 454 455 456 457 458 459 460

  // Get another virtual space and add it to the list.  This
  // is typically prompted by a failed attempt to allocate a chunk
  // and is typically followed by the allocation of a chunk.
  bool grow_vs(size_t vs_word_size);

 public:
  VirtualSpaceList(size_t word_size);
  VirtualSpaceList(ReservedSpace rs);

461 462
  size_t free_bytes();

463 464 465 466
  Metachunk* get_new_chunk(size_t word_size,
                           size_t grow_chunks_by_words,
                           size_t medium_chunk_bunch);

467 468
  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);

469 470 471 472
  // Get the first chunk for a Metaspace.  Used for
  // special cases such as the boot class loader, reflection
  // class loader and anonymous class loader.
  Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
473 474 475 476 477 478 479 480 481 482 483

  VirtualSpaceNode* current_virtual_space() {
    return _current_virtual_space;
  }

  ChunkManager* chunk_manager() { return &_chunk_manager; }
  bool is_class() const { return _is_class; }

  // Allocate the first virtualspace.
  void initialize(size_t word_size);

484 485 486 487
  size_t reserved_words()  { return _reserved_words; }
  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
  size_t committed_words() { return _committed_words; }
  size_t committed_bytes() { return committed_words() * BytesPerWord; }
488

489 490 491 492
  void inc_reserved_words(size_t v);
  void dec_reserved_words(size_t v);
  void inc_committed_words(size_t v);
  void dec_committed_words(size_t v);
493 494 495 496 497
  void inc_virtual_space_count();
  void dec_virtual_space_count();

  // Unlink empty VirtualSpaceNodes and free it.
  void purge();
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577

  // Used and capacity in the entire list of virtual spaces.
  // These are global values shared by all Metaspaces
  size_t capacity_words_sum();
  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
  size_t used_words_sum();
  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }

  bool contains(const void *ptr);

  void print_on(outputStream* st) const;

  class VirtualSpaceListIterator : public StackObj {
    VirtualSpaceNode* _virtual_spaces;
   public:
    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
      _virtual_spaces(virtual_spaces) {}

    bool repeat() {
      return _virtual_spaces != NULL;
    }

    VirtualSpaceNode* get_next() {
      VirtualSpaceNode* result = _virtual_spaces;
      if (_virtual_spaces != NULL) {
        _virtual_spaces = _virtual_spaces->next();
      }
      return result;
    }
  };
};

class Metadebug : AllStatic {
  // Debugging support for Metaspaces
  static int _deallocate_block_a_lot_count;
  static int _deallocate_chunk_a_lot_count;
  static int _allocation_fail_alot_count;

 public:
  static int deallocate_block_a_lot_count() {
    return _deallocate_block_a_lot_count;
  }
  static void set_deallocate_block_a_lot_count(int v) {
    _deallocate_block_a_lot_count = v;
  }
  static void inc_deallocate_block_a_lot_count() {
    _deallocate_block_a_lot_count++;
  }
  static int deallocate_chunk_a_lot_count() {
    return _deallocate_chunk_a_lot_count;
  }
  static void reset_deallocate_chunk_a_lot_count() {
    _deallocate_chunk_a_lot_count = 1;
  }
  static void inc_deallocate_chunk_a_lot_count() {
    _deallocate_chunk_a_lot_count++;
  }

  static void init_allocation_fail_alot_count();
#ifdef ASSERT
  static bool test_metadata_failure();
#endif

  static void deallocate_chunk_a_lot(SpaceManager* sm,
                                     size_t chunk_word_size);
  static void deallocate_block_a_lot(SpaceManager* sm,
                                     size_t chunk_word_size);

};

int Metadebug::_deallocate_block_a_lot_count = 0;
int Metadebug::_deallocate_chunk_a_lot_count = 0;
int Metadebug::_allocation_fail_alot_count = 0;

//  SpaceManager - used by Metaspace to handle allocations
class SpaceManager : public CHeapObj<mtClass> {
  friend class Metaspace;
  friend class Metadebug;

 private:
578

579 580 581
  // protects allocations and contains.
  Mutex* const _lock;

582 583 584
  // Type of metadata allocated.
  Metaspace::MetadataType _mdtype;

585 586 587
  // Chunk related size
  size_t _medium_chunk_bunch;

588 589 590
  // List of chunks in use by this SpaceManager.  Allocations
  // are done from the current chunk.  The list is used for deallocating
  // chunks when the SpaceManager is freed.
591
  Metachunk* _chunks_in_use[NumberOfInUseLists];
592 593 594 595 596 597 598 599 600 601
  Metachunk* _current_chunk;

  // Virtual space where allocation comes from.
  VirtualSpaceList* _vs_list;

  // Number of small chunks to allocate to a manager
  // If class space manager, small chunks are unlimited
  static uint const _small_chunk_limit;

  // Sum of all space in allocated chunks
602 603 604 605 606
  size_t _allocated_blocks_words;

  // Sum of all allocated chunks
  size_t _allocated_chunks_words;
  size_t _allocated_chunks_count;
607 608 609 610 611 612 613 614 615 616 617 618

  // Free lists of blocks are per SpaceManager since they
  // are assumed to be in chunks in use by the SpaceManager
  // and all chunks in use by a SpaceManager are freed when
  // the class loader using the SpaceManager is collected.
  BlockFreelist _block_freelists;

  // protects virtualspace and chunk expansions
  static const char*  _expand_lock_name;
  static const int    _expand_lock_rank;
  static Mutex* const _expand_lock;

619
 private:
620 621 622 623 624 625 626 627
  // Accessors
  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
  void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }

  BlockFreelist* block_freelists() const {
    return (BlockFreelist*) &_block_freelists;
  }

628
  Metaspace::MetadataType mdtype() { return _mdtype; }
629 630 631 632 633 634 635 636 637 638 639
  VirtualSpaceList* vs_list() const    { return _vs_list; }

  Metachunk* current_chunk() const { return _current_chunk; }
  void set_current_chunk(Metachunk* v) {
    _current_chunk = v;
  }

  Metachunk* find_current_chunk(size_t word_size);

  // Add chunk to the list of chunks in use
  void add_chunk(Metachunk* v, bool make_current);
640
  void retire_current_chunk();
641 642 643

  Mutex* lock() const { return _lock; }

644 645 646 647 648
  const char* chunk_size_name(ChunkIndex index) const;

 protected:
  void initialize();

649
 public:
650 651
  SpaceManager(Metaspace::MetadataType mdtype,
               Mutex* lock,
652
               VirtualSpaceList* vs_list);
653 654
  ~SpaceManager();

655 656
  enum ChunkMultiples {
    MediumChunkMultiple = 4
657 658 659
  };

  // Accessors
660 661 662 663 664
  size_t specialized_chunk_size() { return SpecializedChunk; }
  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
  size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }

665 666 667 668 669
  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
  size_t allocated_chunks_count() const { return _allocated_chunks_count; }

670
  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
671 672 673

  static Mutex* expand_lock() { return _expand_lock; }

674 675 676 677 678 679 680 681 682 683 684 685
  // Increment the per Metaspace and global running sums for Metachunks
  // by the given size.  This is used when a Metachunk to added to
  // the in-use list.
  void inc_size_metrics(size_t words);
  // Increment the per Metaspace and global running sums Metablocks by the given
  // size.  This is used when a Metablock is allocated.
  void inc_used_metrics(size_t words);
  // Delete the portion of the running sums for this SpaceManager. That is,
  // the globals running sums for the Metachunks and Metablocks are
  // decremented for all the Metachunks in-use by this SpaceManager.
  void dec_total_from_size_metrics();

686 687 688 689 690
  // Set the sizes for the initial chunks.
  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                               size_t* chunk_word_size,
                               size_t* class_chunk_word_size);

691 692 693 694 695 696 697 698 699
  size_t sum_capacity_in_chunks_in_use() const;
  size_t sum_used_in_chunks_in_use() const;
  size_t sum_free_in_chunks_in_use() const;
  size_t sum_waste_in_chunks_in_use() const;
  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;

  size_t sum_count_in_chunks_in_use();
  size_t sum_count_in_chunks_in_use(ChunkIndex i);

700 701
  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);

702 703 704 705 706
  // Block allocation and deallocation.
  // Allocates a block from the current chunk
  MetaWord* allocate(size_t word_size);

  // Helper for allocations
707
  MetaWord* allocate_work(size_t word_size);
708 709

  // Returns a block to the per manager freelist
710
  void deallocate(MetaWord* p, size_t word_size);
711 712 713 714 715 716 717 718

  // Based on the allocation size and a minimum chunk size,
  // returned chunk size (for expanding space for chunk allocation).
  size_t calc_chunk_size(size_t allocation_word_size);

  // Called when an allocation from the current chunk fails.
  // Gets a new chunk (may require getting a new virtual space),
  // and allocates from that chunk.
719
  MetaWord* grow_and_allocate(size_t word_size);
720 721 722 723 724 725 726 727

  // debugging support.

  void dump(outputStream* const out) const;
  void print_on(outputStream* st) const;
  void locked_print_chunks_in_use_on(outputStream* st) const;

  void verify();
728
  void verify_chunk_size(Metachunk* chunk);
729
  NOT_PRODUCT(void mangle_freed_chunks();)
730
#ifdef ASSERT
731
  void verify_allocated_blocks_words();
732
#endif
733 734 735 736 737 738 739

  size_t get_raw_word_size(size_t word_size) {
    // If only the dictionary is going to be used (i.e., no
    // indexed free list), then there is a minimum size requirement.
    // MinChunkSize is a placeholder for the real minimum size JJJ
    size_t byte_size = word_size * BytesPerWord;

740
    size_t raw_bytes_size = MAX2(byte_size,
741 742 743 744 745 746 747
                                 Metablock::min_block_byte_size());
    raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
    size_t raw_word_size = raw_bytes_size / BytesPerWord;
    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");

    return raw_word_size;
  }
748 749 750 751 752 753 754 755 756 757 758 759
};

uint const SpaceManager::_small_chunk_limit = 4;

const char* SpaceManager::_expand_lock_name =
  "SpaceManager chunk allocation lock";
const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
Mutex* const SpaceManager::_expand_lock =
  new Mutex(SpaceManager::_expand_lock_rank,
            SpaceManager::_expand_lock_name,
            Mutex::_allow_vm_block_flag);

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
void VirtualSpaceNode::inc_container_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _container_count++;
  assert(_container_count == container_count_slow(),
         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
                 "container_count_slow() " SIZE_FORMAT,
                 _container_count, container_count_slow()));
}

void VirtualSpaceNode::dec_container_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _container_count--;
}

#ifdef ASSERT
void VirtualSpaceNode::verify_container_count() {
  assert(_container_count == container_count_slow(),
    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
}
#endif

782 783 784 785 786 787 788 789 790 791 792 793 794
// BlockFreelist methods

BlockFreelist::BlockFreelist() : _dictionary(NULL) {}

BlockFreelist::~BlockFreelist() {
  if (_dictionary != NULL) {
    if (Verbose && TraceMetadataChunkAllocation) {
      _dictionary->print_free_lists(gclog_or_tty);
    }
    delete _dictionary;
  }
}

795 796 797 798 799
Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
  Metablock* block = (Metablock*) p;
  block->set_word_size(word_size);
  block->set_prev(NULL);
  block->set_next(NULL);
800 801 802 803

  return block;
}

804 805
void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
  Metablock* free_chunk = initialize_free_chunk(p, word_size);
806
  if (dictionary() == NULL) {
807
   _dictionary = new BlockTreeDictionary();
808
  }
809
  dictionary()->return_chunk(free_chunk);
810 811
}

812
MetaWord* BlockFreelist::get_block(size_t word_size) {
813 814 815 816
  if (dictionary() == NULL) {
    return NULL;
  }

817 818
  if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
    // Dark matter.  Too small for dictionary.
819 820 821
    return NULL;
  }

822
  Metablock* free_block =
823
    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
824 825 826 827
  if (free_block == NULL) {
    return NULL;
  }

828 829 830 831 832 833 834 835 836 837 838 839 840 841
  const size_t block_size = free_block->size();
  if (block_size > WasteMultiplier * word_size) {
    return_block((MetaWord*)free_block, block_size);
    return NULL;
  }

  MetaWord* new_block = (MetaWord*)free_block;
  assert(block_size >= word_size, "Incorrect size of block from freelist");
  const size_t unused = block_size - word_size;
  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
    return_block(new_block + word_size, unused);
  }

  return new_block;
842 843 844 845 846 847 848 849 850 851 852 853 854
}

void BlockFreelist::print_on(outputStream* st) const {
  if (dictionary() == NULL) {
    return;
  }
  dictionary()->print_free_lists(st);
}

// VirtualSpaceNode methods

VirtualSpaceNode::~VirtualSpaceNode() {
  _rs.release();
855 856 857 858
#ifdef ASSERT
  size_t word_size = sizeof(*this) / BytesPerWord;
  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
#endif
859 860 861 862 863 864 865 866 867 868 869
}

size_t VirtualSpaceNode::used_words_in_vs() const {
  return pointer_delta(top(), bottom(), sizeof(MetaWord));
}

// Space committed in the VirtualSpace
size_t VirtualSpaceNode::capacity_words_in_vs() const {
  return pointer_delta(end(), bottom(), sizeof(MetaWord));
}

870 871 872
size_t VirtualSpaceNode::free_words_in_vs() const {
  return pointer_delta(end(), top(), sizeof(MetaWord));
}
873 874 875 876 877 878 879 880 881 882 883

// Allocates the chunk from the virtual space only.
// This interface is also used internally for debugging.  Not all
// chunks removed here are necessarily used for allocation.
Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
  // Bottom of the new chunk
  MetaWord* chunk_limit = top();
  assert(chunk_limit != NULL, "Not safe to call this method");

  if (!is_available(chunk_word_size)) {
    if (TraceMetadataChunkAllocation) {
884
      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
885
      // Dump some information about the virtual space that is nearly full
886
      print_on(gclog_or_tty);
887 888 889 890 891 892 893
    }
    return NULL;
  }

  // Take the space  (bump top on the current virtual space).
  inc_top(chunk_word_size);

894 895
  // Initialize the chunk
  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
896 897 898 899 900 901 902 903 904 905 906
  return result;
}


// Expand the virtual space (commit more of the reserved space)
bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
  size_t bytes = words * BytesPerWord;
  bool result =  virtual_space()->expand_by(bytes, pre_touch);
  if (TraceMetavirtualspaceAllocation && !result) {
    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
                           "for byte size " SIZE_FORMAT, bytes);
907
    virtual_space()->print_on(gclog_or_tty);
908 909 910 911 912 913
  }
  return result;
}

Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
914 915 916 917 918
  Metachunk* result = take_from_committed(chunk_word_size);
  if (result != NULL) {
    inc_container_count();
  }
  return result;
919 920 921 922 923 924 925 926
}

bool VirtualSpaceNode::initialize() {

  if (!_rs.is_reserved()) {
    return false;
  }

927 928 929 930
  // An allocation out of this Virtualspace that is larger
  // than an initial commit size can waste that initial committed
  // space.
  size_t committed_byte_size = 0;
931 932 933 934 935 936
  bool result = virtual_space()->initialize(_rs, committed_byte_size);
  if (result) {
    set_top((MetaWord*)virtual_space()->low());
    set_reserved(MemRegion((HeapWord*)_rs.base(),
                 (HeapWord*)(_rs.base() + _rs.size())));

937 938 939 940 941 942 943 944
    assert(reserved()->start() == (HeapWord*) _rs.base(),
      err_msg("Reserved start was not set properly " PTR_FORMAT
        " != " PTR_FORMAT, reserved()->start(), _rs.base()));
    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
      err_msg("Reserved size was not set properly " SIZE_FORMAT
        " != " SIZE_FORMAT, reserved()->word_size(),
        _rs.size() / BytesPerWord));
  }
945 946 947 948 949 950 951 952 953 954 955

  return result;
}

void VirtualSpaceNode::print_on(outputStream* st) const {
  size_t used = used_words_in_vs();
  size_t capacity = capacity_words_in_vs();
  VirtualSpace* vs = virtual_space();
  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
           "[" PTR_FORMAT ", " PTR_FORMAT ", "
           PTR_FORMAT ", " PTR_FORMAT ")",
956 957
           vs, capacity / K,
           capacity == 0 ? 0 : used * 100 / capacity,
958 959 960 961
           bottom(), top(), end(),
           vs->high_boundary());
}

962
#ifdef ASSERT
963 964 965 966
void VirtualSpaceNode::mangle() {
  size_t word_size = capacity_words_in_vs();
  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
}
967
#endif // ASSERT
968 969 970 971 972 973 974 975 976 977 978 979

// VirtualSpaceList methods
// Space allocated from the VirtualSpace

VirtualSpaceList::~VirtualSpaceList() {
  VirtualSpaceListIterator iter(virtual_space_list());
  while (iter.repeat()) {
    VirtualSpaceNode* vsl = iter.get_next();
    delete vsl;
  }
}

980
void VirtualSpaceList::inc_reserved_words(size_t v) {
981
  assert_lock_strong(SpaceManager::expand_lock());
982
  _reserved_words = _reserved_words + v;
983
}
984
void VirtualSpaceList::dec_reserved_words(size_t v) {
985
  assert_lock_strong(SpaceManager::expand_lock());
986 987 988 989 990 991 992 993 994 995
  _reserved_words = _reserved_words - v;
}

void VirtualSpaceList::inc_committed_words(size_t v) {
  assert_lock_strong(SpaceManager::expand_lock());
  _committed_words = _committed_words + v;
}
void VirtualSpaceList::dec_committed_words(size_t v) {
  assert_lock_strong(SpaceManager::expand_lock());
  _committed_words = _committed_words - v;
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
}

void VirtualSpaceList::inc_virtual_space_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_count++;
}
void VirtualSpaceList::dec_virtual_space_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_count--;
}

void ChunkManager::remove_chunk(Metachunk* chunk) {
  size_t word_size = chunk->word_size();
  ChunkIndex index = list_index(word_size);
  if (index != HumongousIndex) {
    free_chunks(index)->remove_chunk(chunk);
  } else {
    humongous_dictionary()->remove_chunk(chunk);
  }

  // Chunk is being removed from the chunks free list.
  dec_free_chunks_total(chunk->capacity_word_size());
}

// Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count.  Remove Metachunks in
// the node from their respective freelists.
void VirtualSpaceList::purge() {
  assert_lock_strong(SpaceManager::expand_lock());
  // Don't use a VirtualSpaceListIterator because this
  // list is being changed and a straightforward use of an iterator is not safe.
  VirtualSpaceNode* purged_vsl = NULL;
  VirtualSpaceNode* prev_vsl = virtual_space_list();
  VirtualSpaceNode* next_vsl = prev_vsl;
  while (next_vsl != NULL) {
    VirtualSpaceNode* vsl = next_vsl;
    next_vsl = vsl->next();
    // Don't free the current virtual space since it will likely
    // be needed soon.
    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
      // Unlink it from the list
      if (prev_vsl == vsl) {
        // This is the case of the current note being the first note.
        assert(vsl == virtual_space_list(), "Expected to be the first note");
        set_virtual_space_list(vsl->next());
      } else {
        prev_vsl->set_next(vsl->next());
      }

      vsl->purge(chunk_manager());
1046 1047
      dec_reserved_words(vsl->reserved_words());
      dec_committed_words(vsl->committed_words());
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
      dec_virtual_space_count();
      purged_vsl = vsl;
      delete vsl;
    } else {
      prev_vsl = vsl;
    }
  }
#ifdef ASSERT
  if (purged_vsl != NULL) {
  // List should be stable enough to use an iterator here.
  VirtualSpaceListIterator iter(virtual_space_list());
    while (iter.repeat()) {
      VirtualSpaceNode* vsl = iter.get_next();
      assert(vsl != purged_vsl, "Purge of vsl failed");
    }
  }
#endif
}

1067 1068 1069 1070 1071 1072 1073 1074
size_t VirtualSpaceList::used_words_sum() {
  size_t allocated_by_vs = 0;
  VirtualSpaceListIterator iter(virtual_space_list());
  while (iter.repeat()) {
    VirtualSpaceNode* vsl = iter.get_next();
    // Sum used region [bottom, top) in each virtualspace
    allocated_by_vs += vsl->used_words_in_vs();
  }
E
ehelin 已提交
1075
  assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
1076 1077
    err_msg("Total in free chunks " SIZE_FORMAT
            " greater than total from virtual_spaces " SIZE_FORMAT,
E
ehelin 已提交
1078
            allocated_by_vs, chunk_manager()->free_chunks_total_words()));
1079
  size_t used =
E
ehelin 已提交
1080
    allocated_by_vs - chunk_manager()->free_chunks_total_words();
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
  return used;
}

// Space available in all MetadataVirtualspaces allocated
// for metadata.  This is the upper limit on the capacity
// of chunks allocated out of all the MetadataVirtualspaces.
size_t VirtualSpaceList::capacity_words_sum() {
  size_t capacity = 0;
  VirtualSpaceListIterator iter(virtual_space_list());
  while (iter.repeat()) {
    VirtualSpaceNode* vsl = iter.get_next();
    capacity += vsl->capacity_words_in_vs();
  }
  return capacity;
}

VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
                                   _is_class(false),
                                   _virtual_space_list(NULL),
                                   _current_virtual_space(NULL),
1101 1102
                                   _reserved_words(0),
                                   _committed_words(0),
1103 1104 1105 1106 1107
                                   _virtual_space_count(0) {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  bool initialization_succeeded = grow_vs(word_size);

1108 1109 1110
  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
1111 1112 1113 1114 1115 1116 1117 1118
  assert(initialization_succeeded,
    " VirtualSpaceList initialization should not fail");
}

VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
                                   _is_class(true),
                                   _virtual_space_list(NULL),
                                   _current_virtual_space(NULL),
1119 1120
                                   _reserved_words(0),
                                   _committed_words(0),
1121 1122 1123 1124 1125
                                   _virtual_space_count(0) {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  bool succeeded = class_entry->initialize();
1126 1127 1128
  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
1129
  assert(succeeded, " VirtualSpaceList initialization should not fail");
1130
  link_vs(class_entry);
1131 1132
}

1133 1134 1135 1136
size_t VirtualSpaceList::free_bytes() {
  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
// Allocate another meta virtual space and add it to the list.
bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
  if (vs_word_size == 0) {
    return false;
  }
  // Reserve the space
  size_t vs_byte_size = vs_word_size * BytesPerWord;
  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");

  // Allocate the meta virtual space and initialize it.
  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  if (!new_entry->initialize()) {
    delete new_entry;
    return false;
  } else {
1153
    assert(new_entry->reserved_words() == vs_word_size, "Must be");
1154 1155
    // ensure lock-free iteration sees fully initialized node
    OrderAccess::storestore();
1156
    link_vs(new_entry);
1157 1158 1159 1160
    return true;
  }
}

1161
void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1162 1163 1164 1165 1166 1167
  if (virtual_space_list() == NULL) {
      set_virtual_space_list(new_entry);
  } else {
    current_virtual_space()->set_next(new_entry);
  }
  set_current_virtual_space(new_entry);
1168 1169
  inc_reserved_words(new_entry->reserved_words());
  inc_committed_words(new_entry->committed_words());
1170 1171 1172 1173 1174 1175
  inc_virtual_space_count();
#ifdef ASSERT
  new_entry->mangle();
#endif
  if (TraceMetavirtualspaceAllocation && Verbose) {
    VirtualSpaceNode* vsl = current_virtual_space();
1176
    vsl->print_on(gclog_or_tty);
1177 1178 1179
  }
}

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
  size_t before = node->committed_words();

  bool result = node->expand_by(word_size, pre_touch);

  size_t after = node->committed_words();

  // after and before can be the same if the memory was pre-committed.
  assert(after >= before, "Must be");
  inc_committed_words(after - before);

  return result;
}

1194
Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1195 1196
                                           size_t grow_chunks_by_words,
                                           size_t medium_chunk_bunch) {
1197 1198 1199 1200

  // Get a chunk from the chunk freelist
  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);

1201 1202 1203 1204
  if (next != NULL) {
    next->container()->inc_container_count();
  } else {
    // Allocate a chunk out of the current virtual space.
1205 1206 1207 1208 1209 1210
    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  }

  if (next == NULL) {
    // Not enough room in current virtual space.  Try to commit
    // more space.
1211 1212
    size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
                                     grow_chunks_by_words);
1213 1214 1215 1216
    size_t page_size_words = os::vm_page_size() / BytesPerWord;
    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
                                                        page_size_words);
    bool vs_expanded =
1217
      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
    if (!vs_expanded) {
      // Should the capacity of the metaspaces be expanded for
      // this allocation?  If it's the virtual space for classes and is
      // being used for CompressedHeaders, don't allocate a new virtualspace.
      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
        // Get another virtual space.
          size_t grow_vs_words =
            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
        if (grow_vs(grow_vs_words)) {
          // Got it.  It's on the list now.  Get a chunk from it.
1228 1229 1230 1231 1232 1233 1234 1235
          assert(current_virtual_space()->expanded_words() == 0,
              "New virtuals space nodes should not have expanded");

          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
                                                              page_size_words);
          // We probably want to expand by aligned_expand_vs_by_words here.
          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
        }
      } else {
        // Allocation will fail and induce a GC
        if (TraceMetadataChunkAllocation && Verbose) {
          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
            " Fail instead of expand the metaspace");
        }
      }
    } else {
      // The virtual space expanded, get a new chunk
      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
      assert(next != NULL, "Just expanded, should succeed");
    }
  }

1251 1252
  assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
         "New chunk is still on some list");
1253 1254 1255
  return next;
}

1256 1257 1258 1259 1260 1261 1262 1263 1264
Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
                                                      size_t chunk_bunch) {
  // Get a chunk from the chunk freelist
  Metachunk* new_chunk = get_new_chunk(chunk_word_size,
                                       chunk_word_size,
                                       chunk_bunch);
  return new_chunk;
}

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
void VirtualSpaceList::print_on(outputStream* st) const {
  if (TraceMetadataChunkAllocation && Verbose) {
    VirtualSpaceListIterator iter(virtual_space_list());
    while (iter.repeat()) {
      VirtualSpaceNode* node = iter.get_next();
      node->print_on(st);
    }
  }
}

bool VirtualSpaceList::contains(const void *ptr) {
  VirtualSpaceNode* list = virtual_space_list();
  VirtualSpaceListIterator iter(list);
  while (iter.repeat()) {
    VirtualSpaceNode* node = iter.get_next();
    if (node->reserved()->contains(ptr)) {
      return true;
    }
  }
  return false;
}


// MetaspaceGC methods

// VM_CollectForMetadataAllocation is the vm operation used to GC.
// Within the VM operation after the GC the attempt to allocate the metadata
// should succeed.  If the GC did not free enough space for the metaspace
// allocation, the HWM is increased so that another virtualspace will be
// allocated for the metadata.  With perm gen the increase in the perm
// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
// metaspace policy uses those as the small and large steps for the HWM.
//
// After the GC the compute_new_size() for MetaspaceGC is called to
// resize the capacity of the metaspaces.  The current implementation
1300
// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1301
// to resize the Java heap by some GC's.  New flags can be implemented
1302
// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1303
// free space is desirable in the metaspace capacity to decide how much
1304
// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
// free space is desirable in the metaspace capacity before decreasing
// the HWM.

// Calculate the amount to increase the high water mark (HWM).
// Increase by a minimum amount (MinMetaspaceExpansion) so that
// another expansion is not requested too soon.  If that is not
// enough to satisfy the allocation (i.e. big enough for a word_size
// allocation), increase by MaxMetaspaceExpansion.  If that is still
// not enough, expand by the size of the allocation (word_size) plus
// some.
size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  size_t before_inc = MetaspaceGC::capacity_until_GC();
  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  size_t page_size_words = os::vm_page_size() / BytesPerWord;
  size_t size_delta_words = align_size_up(word_size, page_size_words);
  size_t delta_words = MAX2(size_delta_words, min_delta_words);
  if (delta_words > min_delta_words) {
    // Don't want to hit the high water mark on the next
    // allocation so make the delta greater than just enough
    // for this allocation.
    delta_words = MAX2(delta_words, max_delta_words);
    if (delta_words > max_delta_words) {
      // This allocation is large but the next ones are probably not
      // so increase by the minimum.
      delta_words = delta_words + min_delta_words;
    }
  }
  return delta_words;
}

bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1337

1338
  // If the user wants a limit, impose one.
1339 1340 1341 1342 1343 1344
  // The reason for someone using this flag is to limit reserved space.  So
  // for non-class virtual space, compare against virtual spaces that are reserved.
  // For class virtual space, we only compare against the committed space, not
  // reserved space, because this is a larger space prereserved for compressed
  // class pointers.
  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
1345
    size_t real_allocated = Metaspace::space_list()->reserved_words() +
1346 1347 1348 1349
              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
    if (real_allocated >= MaxMetaspaceSize) {
      return false;
    }
1350 1351
  }

1352 1353
  // Class virtual space should always be expanded.  Call GC for the other
  // metadata virtual space.
1354 1355
  if (Metaspace::using_class_space() &&
      (vsl == Metaspace::class_space_list())) return true;
1356

1357 1358
  // If this is part of an allocation after a GC, expand
  // unconditionally.
1359
  if (MetaspaceGC::expand_after_GC()) {
1360 1361 1362
    return true;
  }

1363

1364 1365 1366 1367
  // If the capacity is below the minimum capacity, allow the
  // expansion.  Also set the high-water-mark (capacity_until_GC)
  // to that minimum capacity so that a GC will not be induced
  // until that minimum capacity is exceeded.
1368 1369
  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
  size_t metaspace_size_bytes = MetaspaceSize;
1370
  if (committed_capacity_bytes < metaspace_size_bytes ||
1371
      capacity_until_GC() == 0) {
1372
    set_capacity_until_GC(metaspace_size_bytes);
1373 1374
    return true;
  } else {
1375
    if (committed_capacity_bytes < capacity_until_GC()) {
1376 1377 1378 1379 1380
      return true;
    } else {
      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
                        "  capacity_until_GC " SIZE_FORMAT
1381
                        "  allocated_capacity_bytes " SIZE_FORMAT,
1382 1383
                        word_size,
                        capacity_until_GC(),
1384
                        MetaspaceAux::allocated_capacity_bytes());
1385 1386 1387 1388 1389 1390
      }
      return false;
    }
  }
}

1391

1392 1393 1394 1395 1396 1397

void MetaspaceGC::compute_new_size() {
  assert(_shrink_factor <= 100, "invalid shrink factor");
  uint current_shrink_factor = _shrink_factor;
  _shrink_factor = 0;

1398 1399 1400 1401
  // Until a faster way of calculating the "used" quantity is implemented,
  // use "capacity".
  const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1402

1403
  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
  const double maximum_used_percentage = 1.0 - minimum_free_percentage;

  const double min_tmp = used_after_gc / maximum_used_percentage;
  size_t minimum_desired_capacity =
    (size_t)MIN2(min_tmp, double(max_uintx));
  // Don't shrink less than the initial generation size
  minimum_desired_capacity = MAX2(minimum_desired_capacity,
                                  MetaspaceSize);

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
    gclog_or_tty->print_cr("  "
                  "  minimum_free_percentage: %6.2f"
                  "  maximum_used_percentage: %6.2f",
                  minimum_free_percentage,
                  maximum_used_percentage);
    gclog_or_tty->print_cr("  "
1421 1422
                  "   used_after_gc       : %6.1fKB",
                  used_after_gc / (double) K);
1423 1424 1425
  }


1426
  size_t shrink_bytes = 0;
1427 1428 1429 1430 1431 1432
  if (capacity_until_GC < minimum_desired_capacity) {
    // If we have less capacity below the metaspace HWM, then
    // increment the HWM.
    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
    // Don't expand unless it's significant
    if (expand_bytes >= MinMetaspaceExpansion) {
1433
      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
1434 1435
    }
    if (PrintGCDetails && Verbose) {
1436
      size_t new_capacity_until_GC = capacity_until_GC;
1437
      gclog_or_tty->print_cr("    expanding:"
1438 1439 1440 1441
                    "  minimum_desired_capacity: %6.1fKB"
                    "  expand_bytes: %6.1fKB"
                    "  MinMetaspaceExpansion: %6.1fKB"
                    "  new metaspace HWM:  %6.1fKB",
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
                    minimum_desired_capacity / (double) K,
                    expand_bytes / (double) K,
                    MinMetaspaceExpansion / (double) K,
                    new_capacity_until_GC / (double) K);
    }
    return;
  }

  // No expansion, now see if we want to shrink
  // We would never want to shrink more than this
1452 1453 1454
  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
    max_shrink_bytes));
1455 1456

  // Should shrinking be considered?
1457 1458
  if (MaxMetaspaceFreeRatio < 100) {
    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1459 1460 1461 1462 1463
    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
    const double max_tmp = used_after_gc / minimum_used_percentage;
    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
    maximum_desired_capacity = MAX2(maximum_desired_capacity,
                                    MetaspaceSize);
1464
    if (PrintGCDetails && Verbose) {
1465 1466 1467 1468 1469 1470
      gclog_or_tty->print_cr("  "
                             "  maximum_free_percentage: %6.2f"
                             "  minimum_used_percentage: %6.2f",
                             maximum_free_percentage,
                             minimum_used_percentage);
      gclog_or_tty->print_cr("  "
1471 1472
                             "  minimum_desired_capacity: %6.1fKB"
                             "  maximum_desired_capacity: %6.1fKB",
1473 1474 1475 1476 1477 1478 1479 1480 1481
                             minimum_desired_capacity / (double) K,
                             maximum_desired_capacity / (double) K);
    }

    assert(minimum_desired_capacity <= maximum_desired_capacity,
           "sanity check");

    if (capacity_until_GC > maximum_desired_capacity) {
      // Capacity too large, compute shrinking size
1482
      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1483 1484 1485 1486 1487 1488
      // We don't want shrink all the way back to initSize if people call
      // System.gc(), because some programs do that between "phases" and then
      // we'd just have to grow the heap up again for the next phase.  So we
      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
      // on the third call, and 100% by the fourth call.  But if we recompute
      // size without shrinking, it goes back to 0%.
1489 1490
      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
      assert(shrink_bytes <= max_shrink_bytes,
1491
        err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1492
          shrink_bytes, max_shrink_bytes));
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
      if (current_shrink_factor == 0) {
        _shrink_factor = 10;
      } else {
        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
      }
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("  "
                      "  shrinking:"
                      "  initSize: %.1fK"
                      "  maximum_desired_capacity: %.1fK",
                      MetaspaceSize / (double) K,
                      maximum_desired_capacity / (double) K);
        gclog_or_tty->print_cr("  "
1506
                      "  shrink_bytes: %.1fK"
1507 1508 1509
                      "  current_shrink_factor: %d"
                      "  new shrink factor: %d"
                      "  MinMetaspaceExpansion: %.1fK",
1510
                      shrink_bytes / (double) K,
1511 1512 1513 1514 1515 1516 1517 1518
                      current_shrink_factor,
                      _shrink_factor,
                      MinMetaspaceExpansion / (double) K);
      }
    }
  }

  // Don't shrink unless it's significant
1519 1520 1521
  if (shrink_bytes >= MinMetaspaceExpansion &&
      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545
  }
}

// Metadebug methods

void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
                                       size_t chunk_word_size){
#ifdef ASSERT
  VirtualSpaceList* vsl = sm->vs_list();
  if (MetaDataDeallocateALot &&
      Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
    Metadebug::reset_deallocate_chunk_a_lot_count();
    for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
      Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
      if (dummy_chunk == NULL) {
        break;
      }
      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);

      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
                               sm->sum_count_in_chunks_in_use());
        dummy_chunk->print_on(gclog_or_tty);
        gclog_or_tty->print_cr("  Free chunks total %d  count %d",
E
ehelin 已提交
1546
                               vsl->chunk_manager()->free_chunks_total_words(),
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
                               vsl->chunk_manager()->free_chunks_count());
      }
    }
  } else {
    Metadebug::inc_deallocate_chunk_a_lot_count();
  }
#endif
}

void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
                                       size_t raw_word_size){
#ifdef ASSERT
  if (MetaDataDeallocateALot &&
        Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
    Metadebug::set_deallocate_block_a_lot_count(0);
    for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1563
      MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1564 1565 1566
      if (dummy_block == 0) {
        break;
      }
1567
      sm->deallocate(dummy_block, raw_word_size);
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
    }
  } else {
    Metadebug::inc_deallocate_block_a_lot_count();
  }
#endif
}

void Metadebug::init_allocation_fail_alot_count() {
  if (MetadataAllocationFailALot) {
    _allocation_fail_alot_count =
      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
  }
}

#ifdef ASSERT
bool Metadebug::test_metadata_failure() {
  if (MetadataAllocationFailALot &&
      Threads::is_vm_complete()) {
    if (_allocation_fail_alot_count > 0) {
      _allocation_fail_alot_count--;
    } else {
      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print_cr("Metadata allocation failing for "
                               "MetadataAllocationFailALot");
      }
      init_allocation_fail_alot_count();
      return true;
    }
  }
  return false;
}
#endif

// ChunkManager methods

E
ehelin 已提交
1603
size_t ChunkManager::free_chunks_total_words() {
1604 1605 1606
  return _free_chunks_total;
}

E
ehelin 已提交
1607 1608
size_t ChunkManager::free_chunks_total_bytes() {
  return free_chunks_total_words() * BytesPerWord;
1609 1610 1611 1612 1613 1614 1615 1616 1617
}

size_t ChunkManager::free_chunks_count() {
#ifdef ASSERT
  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
    MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
    // This lock is only needed in debug because the verification
    // of the _free_chunks_totals walks the list of free chunks
1618
    slow_locked_verify_free_chunks_count();
1619 1620
  }
#endif
1621
  return _free_chunks_count;
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
}

void ChunkManager::locked_verify_free_chunks_total() {
  assert_lock_strong(SpaceManager::expand_lock());
  assert(sum_free_chunks() == _free_chunks_total,
    err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
           " same as sum " SIZE_FORMAT, _free_chunks_total,
           sum_free_chunks()));
}

void ChunkManager::verify_free_chunks_total() {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify_free_chunks_total();
}

void ChunkManager::locked_verify_free_chunks_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  assert(sum_free_chunks_count() == _free_chunks_count,
    err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
           " same as sum " SIZE_FORMAT, _free_chunks_count,
           sum_free_chunks_count()));
}

void ChunkManager::verify_free_chunks_count() {
#ifdef ASSERT
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify_free_chunks_count();
#endif
}

void ChunkManager::verify() {
1655 1656 1657
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify();
1658 1659 1660 1661
}

void ChunkManager::locked_verify() {
  locked_verify_free_chunks_count();
1662
  locked_verify_free_chunks_total();
1663 1664 1665 1666
}

void ChunkManager::locked_print_free_chunks(outputStream* st) {
  assert_lock_strong(SpaceManager::expand_lock());
1667
  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1668 1669 1670 1671 1672
                _free_chunks_total, _free_chunks_count);
}

void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
  assert_lock_strong(SpaceManager::expand_lock());
1673
  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
                sum_free_chunks(), sum_free_chunks_count());
}
ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
  return &_free_chunks[index];
}

// These methods that sum the free chunk lists are used in printing
// methods that are used in product builds.
size_t ChunkManager::sum_free_chunks() {
  assert_lock_strong(SpaceManager::expand_lock());
  size_t result = 0;
1685
  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1686 1687 1688 1689 1690 1691
    ChunkList* list = free_chunks(i);

    if (list == NULL) {
      continue;
    }

1692
    result = result + list->count() * list->size();
1693
  }
1694
  result = result + humongous_dictionary()->total_size();
1695 1696 1697 1698 1699 1700
  return result;
}

size_t ChunkManager::sum_free_chunks_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  size_t count = 0;
1701
  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1702 1703 1704 1705
    ChunkList* list = free_chunks(i);
    if (list == NULL) {
      continue;
    }
1706
    count = count + list->count();
1707
  }
1708
  count = count + humongous_dictionary()->total_free_blocks();
1709 1710 1711 1712
  return count;
}

ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1713 1714 1715
  ChunkIndex index = list_index(word_size);
  assert(index < HumongousIndex, "No humongous list");
  return free_chunks(index);
1716 1717 1718 1719 1720 1721 1722 1723 1724
}

void ChunkManager::free_chunks_put(Metachunk* chunk) {
  assert_lock_strong(SpaceManager::expand_lock());
  ChunkList* free_list = find_free_chunks_list(chunk->word_size());
  chunk->set_next(free_list->head());
  free_list->set_head(chunk);
  // chunk is being returned to the chunk free list
  inc_free_chunks_total(chunk->capacity_word_size());
1725
  slow_locked_verify();
1726 1727 1728 1729 1730 1731 1732
}

void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
  // The deallocation of a chunk originates in the freelist
  // manangement code for a Metaspace and does not hold the
  // lock.
  assert(chunk != NULL, "Deallocating NULL");
1733 1734
  assert_lock_strong(SpaceManager::expand_lock());
  slow_locked_verify();
1735
  if (TraceMetadataChunkAllocation) {
1736 1737 1738
    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
                           PTR_FORMAT "  size " SIZE_FORMAT,
                           chunk, chunk->word_size());
1739 1740 1741 1742 1743 1744 1745
  }
  free_chunks_put(chunk);
}

Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  assert_lock_strong(SpaceManager::expand_lock());

1746
  slow_locked_verify();
1747

1748
  Metachunk* chunk = NULL;
1749
  if (list_index(word_size) != HumongousIndex) {
1750 1751
    ChunkList* free_list = find_free_chunks_list(word_size);
    assert(free_list != NULL, "Sanity check");
1752

1753 1754 1755 1756 1757 1758
    chunk = free_list->head();
    debug_only(Metachunk* debug_head = chunk;)

    if (chunk == NULL) {
      return NULL;
    }
1759 1760

    // Remove the chunk as the head of the list.
1761
    free_list->remove_chunk(chunk);
1762 1763

    // Chunk is being removed from the chunks free list.
1764
    dec_free_chunks_total(chunk->capacity_word_size());
1765 1766

    if (TraceMetadataChunkAllocation && Verbose) {
1767 1768 1769
      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
                             free_list, chunk, chunk->word_size());
1770 1771
    }
  } else {
1772 1773 1774 1775 1776 1777 1778
    chunk = humongous_dictionary()->get_chunk(
      word_size,
      FreeBlockDictionary<Metachunk>::atLeast);

    if (chunk != NULL) {
      if (TraceMetadataHumongousAllocation) {
        size_t waste = chunk->word_size() - word_size;
1779 1780 1781 1782
        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
                               SIZE_FORMAT " for requested size " SIZE_FORMAT
                               " waste " SIZE_FORMAT,
                               chunk->word_size(), word_size, waste);
1783
      }
1784 1785
      // Chunk is being removed from the chunks free list.
      dec_free_chunks_total(chunk->capacity_word_size());
1786 1787
    } else {
      return NULL;
1788 1789
    }
  }
1790 1791 1792 1793

  // Remove it from the links to this freelist
  chunk->set_next(NULL);
  chunk->set_prev(NULL);
1794 1795 1796 1797 1798
#ifdef ASSERT
  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
  // work.
  chunk->set_is_free(false);
#endif
1799
  slow_locked_verify();
1800 1801 1802 1803 1804
  return chunk;
}

Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
1805
  slow_locked_verify();
1806 1807 1808 1809 1810 1811 1812

  // Take from the beginning of the list
  Metachunk* chunk = free_chunks_get(word_size);
  if (chunk == NULL) {
    return NULL;
  }

1813 1814 1815
  assert((word_size <= chunk->word_size()) ||
         list_index(chunk->word_size() == HumongousIndex),
         "Non-humongous variable sized chunk");
1816
  if (TraceMetadataChunkAllocation) {
1817 1818 1819
    size_t list_count;
    if (list_index(word_size) < HumongousIndex) {
      ChunkList* list = find_free_chunks_list(word_size);
1820
      list_count = list->count();
1821 1822 1823
    } else {
      list_count = humongous_dictionary()->total_count();
    }
1824 1825 1826 1827
    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
                        this, chunk, chunk->word_size(), list_count);
    locked_print_free_chunks(gclog_or_tty);
1828 1829 1830 1831 1832
  }

  return chunk;
}

1833 1834 1835 1836 1837 1838
void ChunkManager::print_on(outputStream* out) {
  if (PrintFLSStatistics != 0) {
    humongous_dictionary()->report_statistics();
  }
}

1839 1840
// SpaceManager methods

1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                                           size_t* chunk_word_size,
                                           size_t* class_chunk_word_size) {
  switch (type) {
  case Metaspace::BootMetaspaceType:
    *chunk_word_size = Metaspace::first_chunk_word_size();
    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
    break;
  case Metaspace::ROMetaspaceType:
    *chunk_word_size = SharedReadOnlySize / wordSize;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  case Metaspace::ReadWriteMetaspaceType:
    *chunk_word_size = SharedReadWriteSize / wordSize;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  case Metaspace::AnonymousMetaspaceType:
  case Metaspace::ReflectionMetaspaceType:
    *chunk_word_size = SpecializedChunk;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  default:
    *chunk_word_size = SmallChunk;
    *class_chunk_word_size = ClassSmallChunk;
    break;
  }
1867
  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1868 1869
    err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
            " class " SIZE_FORMAT,
1870
            *chunk_word_size, *class_chunk_word_size));
1871 1872
}

1873 1874 1875
size_t SpaceManager::sum_free_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t free = 0;
1876
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
    Metachunk* chunk = chunks_in_use(i);
    while (chunk != NULL) {
      free += chunk->free_word_size();
      chunk = chunk->next();
    }
  }
  return free;
}

size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t result = 0;
1889
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1890 1891
   result += sum_waste_in_chunks_in_use(i);
  }
1892

1893 1894 1895 1896 1897 1898 1899 1900
  return result;
}

size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
  size_t result = 0;
  Metachunk* chunk = chunks_in_use(index);
  // Count the free space in all the chunk but not the
  // current chunk from which allocations are still being done.
1901 1902
  while (chunk != NULL) {
    if (chunk != current_chunk()) {
1903
      result += chunk->free_word_size();
1904
    }
1905
    chunk = chunk->next();
1906 1907 1908 1909 1910
  }
  return result;
}

size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
  // For CMS use "allocated_chunks_words()" which does not need the
  // Metaspace lock.  For the other collectors sum over the
  // lists.  Use both methods as a check that "allocated_chunks_words()"
  // is correct.  That is, sum_capacity_in_chunks() is too expensive
  // to use in the product and allocated_chunks_words() should be used
  // but allow for  checking that allocated_chunks_words() returns the same
  // value as sum_capacity_in_chunks_in_use() which is the definitive
  // answer.
  if (UseConcMarkSweepGC) {
    return allocated_chunks_words();
  } else {
    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
    size_t sum = 0;
    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
      Metachunk* chunk = chunks_in_use(i);
      while (chunk != NULL) {
        sum += chunk->capacity_word_size();
        chunk = chunk->next();
      }
1930 1931
    }
  return sum;
1932
  }
1933 1934 1935 1936
}

size_t SpaceManager::sum_count_in_chunks_in_use() {
  size_t count = 0;
1937
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1938 1939
    count = count + sum_count_in_chunks_in_use(i);
  }
1940

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
  return count;
}

size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
  size_t count = 0;
  Metachunk* chunk = chunks_in_use(i);
  while (chunk != NULL) {
    count++;
    chunk = chunk->next();
  }
  return count;
}


size_t SpaceManager::sum_used_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t used = 0;
1958
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
    Metachunk* chunk = chunks_in_use(i);
    while (chunk != NULL) {
      used += chunk->used_word_size();
      chunk = chunk->next();
    }
  }
  return used;
}

void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {

1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
    Metachunk* chunk = chunks_in_use(i);
    st->print("SpaceManager: %s " PTR_FORMAT,
                 chunk_size_name(i), chunk);
    if (chunk != NULL) {
      st->print_cr(" free " SIZE_FORMAT,
                   chunk->free_word_size());
    } else {
      st->print_cr("");
    }
  }
1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993

  vs_list()->chunk_manager()->locked_print_free_chunks(st);
  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
}

size_t SpaceManager::calc_chunk_size(size_t word_size) {

  // Decide between a small chunk and a medium chunk.  Up to
  // _small_chunk_limit small chunks can be allocated but
  // once a medium chunk has been allocated, no more small
  // chunks will be allocated.
  size_t chunk_word_size;
  if (chunks_in_use(MediumIndex) == NULL &&
1994
      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1995 1996 1997
    chunk_word_size = (size_t) small_chunk_size();
    if (word_size + Metachunk::overhead() > small_chunk_size()) {
      chunk_word_size = medium_chunk_size();
1998 1999
    }
  } else {
2000
    chunk_word_size = medium_chunk_size();
2001 2002
  }

2003 2004 2005 2006 2007 2008
  // Might still need a humongous chunk.  Enforce an
  // eight word granularity to facilitate reuse (some
  // wastage but better chance of reuse).
  size_t if_humongous_sized_chunk =
    align_size_up(word_size + Metachunk::overhead(),
                  HumongousChunkGranularity);
2009
  chunk_word_size =
2010
    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2011

2012 2013 2014 2015 2016
  assert(!SpaceManager::is_humongous(word_size) ||
         chunk_word_size == if_humongous_sized_chunk,
         err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
                 " chunk_word_size " SIZE_FORMAT,
                 word_size, chunk_word_size));
2017 2018 2019 2020 2021 2022
  if (TraceMetadataHumongousAllocation &&
      SpaceManager::is_humongous(word_size)) {
    gclog_or_tty->print_cr("Metadata humongous allocation:");
    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
                           chunk_word_size);
2023
    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
2024 2025 2026 2027 2028
                           Metachunk::overhead());
  }
  return chunk_word_size;
}

2029
MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2030 2031 2032 2033 2034 2035 2036 2037
  assert(vs_list()->current_virtual_space() != NULL,
         "Should have been set");
  assert(current_chunk() == NULL ||
         current_chunk()->allocate(word_size) == NULL,
         "Don't need to expand");
  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);

  if (TraceMetadataChunkAllocation && Verbose) {
2038 2039 2040 2041 2042 2043
    size_t words_left = 0;
    size_t words_used = 0;
    if (current_chunk() != NULL) {
      words_left = current_chunk()->free_word_size();
      words_used = current_chunk()->used_word_size();
    }
2044
    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2045 2046 2047
                           " words " SIZE_FORMAT " words used " SIZE_FORMAT
                           " words left",
                            word_size, words_used, words_left);
2048 2049 2050 2051
  }

  // Get another chunk out of the virtual space
  size_t grow_chunks_by_words = calc_chunk_size(word_size);
2052
  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066

  // If a chunk was available, add it to the in-use chunk list
  // and do an allocation from it.
  if (next != NULL) {
    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
    // Add to this manager's list of chunks in use.
    add_chunk(next, false);
    return next->allocate(word_size);
  }
  return NULL;
}

void SpaceManager::print_on(outputStream* st) const {

2067
  for (ChunkIndex i = ZeroIndex;
2068
       i < NumberOfInUseLists ;
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
       i = next_chunk_index(i) ) {
    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
                 chunks_in_use(i),
                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
  }
  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
               " Humongous " SIZE_FORMAT,
               sum_waste_in_chunks_in_use(SmallIndex),
               sum_waste_in_chunks_in_use(MediumIndex),
               sum_waste_in_chunks_in_use(HumongousIndex));
2079 2080 2081 2082 2083
  // block free lists
  if (block_freelists() != NULL) {
    st->print_cr("total in block free lists " SIZE_FORMAT,
      block_freelists()->total_size());
  }
2084 2085
}

2086 2087
SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
                           Mutex* lock,
2088
                           VirtualSpaceList* vs_list) :
2089
  _vs_list(vs_list),
2090
  _mdtype(mdtype),
2091 2092 2093
  _allocated_blocks_words(0),
  _allocated_chunks_words(0),
  _allocated_chunks_count(0),
2094 2095 2096 2097 2098
  _lock(lock)
{
  initialize();
}

2099 2100 2101 2102 2103 2104 2105
void SpaceManager::inc_size_metrics(size_t words) {
  assert_lock_strong(SpaceManager::expand_lock());
  // Total of allocated Metachunks and allocated Metachunks count
  // for each SpaceManager
  _allocated_chunks_words = _allocated_chunks_words + words;
  _allocated_chunks_count++;
  // Global total of capacity in allocated Metachunks
2106
  MetaspaceAux::inc_capacity(mdtype(), words);
2107 2108 2109 2110 2111
  // Global total of allocated Metablocks.
  // used_words_slow() includes the overhead in each
  // Metachunk so include it in the used when the
  // Metachunk is first added (so only added once per
  // Metachunk).
2112
  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2113 2114 2115 2116 2117 2118
}

void SpaceManager::inc_used_metrics(size_t words) {
  // Add to the per SpaceManager total
  Atomic::add_ptr(words, &_allocated_blocks_words);
  // Add to the global total
2119
  MetaspaceAux::inc_used(mdtype(), words);
2120 2121 2122
}

void SpaceManager::dec_total_from_size_metrics() {
2123 2124
  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2125
  // Also deduct the overhead per Metachunk
2126
  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2127 2128
}

2129
void SpaceManager::initialize() {
2130
  Metadebug::init_allocation_fail_alot_count();
2131
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2132 2133 2134 2135 2136 2137 2138 2139
    _chunks_in_use[i] = NULL;
  }
  _current_chunk = NULL;
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
  }
}

2140 2141 2142 2143 2144 2145 2146 2147 2148
void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
  if (chunks == NULL) {
    return;
  }
  ChunkList* list = free_chunks(index);
  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
  assert_lock_strong(SpaceManager::expand_lock());
  Metachunk* cur = chunks;

2149
  // This returns chunks one at a time.  If a new
2150 2151 2152 2153
  // class List can be created that is a base class
  // of FreeList then something like FreeList::prepend()
  // can be used in place of this loop
  while (cur != NULL) {
2154 2155
    assert(cur->container() != NULL, "Container should have been set");
    cur->container()->dec_container_count();
2156 2157 2158 2159 2160 2161 2162 2163 2164
    // Capture the next link before it is changed
    // by the call to return_chunk_at_head();
    Metachunk* next = cur->next();
    cur->set_is_free(true);
    list->return_chunk_at_head(cur);
    cur = next;
  }
}

2165
SpaceManager::~SpaceManager() {
2166
  // This call this->_lock which can't be done while holding expand_lock()
2167 2168 2169 2170
  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
            " allocated_chunks_words() " SIZE_FORMAT,
            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2171

2172 2173 2174 2175 2176
  MutexLockerEx fcl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);

  ChunkManager* chunk_manager = vs_list()->chunk_manager();

2177
  chunk_manager->slow_locked_verify();
2178

2179 2180
  dec_total_from_size_metrics();

2181 2182 2183 2184 2185
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
    locked_print_chunks_in_use_on(gclog_or_tty);
  }

2186 2187
  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
  // is during the freeing of a VirtualSpaceNodes.
2188

2189 2190
  // Have to update before the chunks_in_use lists are emptied
  // below.
2191
  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
2192 2193 2194 2195 2196
                                       sum_count_in_chunks_in_use());

  // Add all the chunks in use by this space manager
  // to the global list of free chunks.

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
  // Follow each list of chunks-in-use and add them to the
  // free lists.  Each list is NULL terminated.

  for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print_cr("returned %d %s chunks to freelist",
                             sum_count_in_chunks_in_use(i),
                             chunk_size_name(i));
    }
    Metachunk* chunks = chunks_in_use(i);
2207
    chunk_manager->return_chunks(i, chunks);
2208 2209 2210
    set_chunks_in_use(i, NULL);
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print_cr("updated freelist count %d %s",
2211
                             chunk_manager->free_chunks(i)->count(),
2212 2213 2214
                             chunk_size_name(i));
    }
    assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2215 2216
  }

2217 2218 2219 2220
  // The medium chunk case may be optimized by passing the head and
  // tail of the medium chunk list to add_at_head().  The tail is often
  // the current chunk but there are probably exceptions.

2221
  // Humongous chunks
2222 2223 2224 2225 2226 2227
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
                            sum_count_in_chunks_in_use(HumongousIndex),
                            chunk_size_name(HumongousIndex));
    gclog_or_tty->print("Humongous chunk dictionary: ");
  }
2228 2229 2230
  // Humongous chunks are never the current chunk.
  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);

2231 2232 2233 2234
  while (humongous_chunks != NULL) {
#ifdef ASSERT
    humongous_chunks->set_is_free(true);
#endif
2235 2236 2237 2238 2239 2240 2241 2242 2243
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
                          humongous_chunks,
                          humongous_chunks->word_size());
    }
    assert(humongous_chunks->word_size() == (size_t)
           align_size_up(humongous_chunks->word_size(),
                             HumongousChunkGranularity),
           err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2244
                   " granularity %d",
2245
                   humongous_chunks->word_size(), HumongousChunkGranularity));
2246
    Metachunk* next_humongous_chunks = humongous_chunks->next();
2247
    humongous_chunks->container()->dec_container_count();
2248 2249
    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
    humongous_chunks = next_humongous_chunks;
2250
  }
2251 2252 2253 2254 2255 2256
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("");
    gclog_or_tty->print_cr("updated dictionary count %d %s",
                     chunk_manager->humongous_dictionary()->total_count(),
                     chunk_size_name(HumongousIndex));
  }
2257
  chunk_manager->slow_locked_verify();
2258 2259
}

2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  switch (index) {
    case SpecializedIndex:
      return "Specialized";
    case SmallIndex:
      return "Small";
    case MediumIndex:
      return "Medium";
    case HumongousIndex:
      return "Humongous";
    default:
      return NULL;
  }
}

ChunkIndex ChunkManager::list_index(size_t size) {
  switch (size) {
    case SpecializedChunk:
      assert(SpecializedChunk == ClassSpecializedChunk,
             "Need branch for ClassSpecializedChunk");
      return SpecializedIndex;
    case SmallChunk:
    case ClassSmallChunk:
      return SmallIndex;
    case MediumChunk:
    case ClassMediumChunk:
      return MediumIndex;
    default:
2288
      assert(size > MediumChunk || size > ClassMediumChunk,
2289 2290 2291 2292 2293
             "Not a humongous chunk");
      return HumongousIndex;
  }
}

2294
void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2295
  assert_lock_strong(_lock);
2296
  size_t raw_word_size = get_raw_word_size(word_size);
2297
  size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2298
  assert(raw_word_size >= min_size,
2299
         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2300
  block_freelists()->return_block(p, raw_word_size);
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
}

// Adds a chunk to the list of chunks in use.
void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {

  assert(new_chunk != NULL, "Should not be NULL");
  assert(new_chunk->next() == NULL, "Should not be on a list");

  new_chunk->reset_empty();

  // Find the correct list and and set the current
  // chunk for that list.
2313
  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2314

2315
  if (index != HumongousIndex) {
2316
    retire_current_chunk();
2317
    set_current_chunk(new_chunk);
2318 2319 2320
    new_chunk->set_next(chunks_in_use(index));
    set_chunks_in_use(index, new_chunk);
  } else {
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
    // For null class loader data and DumpSharedSpaces, the first chunk isn't
    // small, so small will be null.  Link this first chunk as the current
    // chunk.
    if (make_current) {
      // Set as the current chunk but otherwise treat as a humongous chunk.
      set_current_chunk(new_chunk);
    }
    // Link at head.  The _current_chunk only points to a humongous chunk for
    // the null class loader metaspace (class and data virtual space managers)
    // any humongous chunks so will not point to the tail
    // of the humongous chunks list.
    new_chunk->set_next(chunks_in_use(HumongousIndex));
    set_chunks_in_use(HumongousIndex, new_chunk);

2335
    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2336 2337
  }

2338 2339 2340
  // Add to the running sum of capacity
  inc_size_metrics(new_chunk->word_size());

2341 2342 2343 2344 2345
  assert(new_chunk->is_empty(), "Not ready for reuse");
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
                        sum_count_in_chunks_in_use());
    new_chunk->print_on(gclog_or_tty);
2346
    if (vs_list() != NULL) {
2347
      vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2348
    }
2349 2350 2351
  }
}

2352 2353 2354 2355 2356 2357 2358 2359 2360 2361
void SpaceManager::retire_current_chunk() {
  if (current_chunk() != NULL) {
    size_t remaining_words = current_chunk()->free_word_size();
    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
      inc_used_metrics(remaining_words);
    }
  }
}

2362 2363 2364 2365 2366 2367 2368
Metachunk* SpaceManager::get_new_chunk(size_t word_size,
                                       size_t grow_chunks_by_words) {

  Metachunk* next = vs_list()->get_new_chunk(word_size,
                                             grow_chunks_by_words,
                                             medium_chunk_bunch());

S
stefank 已提交
2369
  if (TraceMetadataHumongousAllocation && next != NULL &&
2370
      SpaceManager::is_humongous(next->word_size())) {
S
stefank 已提交
2371 2372
    gclog_or_tty->print_cr("  new humongous chunk word size "
                           PTR_FORMAT, next->word_size());
2373 2374 2375 2376 2377
  }

  return next;
}

2378 2379 2380
MetaWord* SpaceManager::allocate(size_t word_size) {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);

2381
  size_t raw_word_size = get_raw_word_size(word_size);
2382
  BlockFreelist* fl =  block_freelists();
2383
  MetaWord* p = NULL;
2384 2385 2386 2387 2388
  // Allocation from the dictionary is expensive in the sense that
  // the dictionary has to be searched for a size.  Don't allocate
  // from the dictionary until it starts to get fat.  Is this
  // a reasonable policy?  Maybe an skinny dictionary is fast enough
  // for allocations.  Do some profiling.  JJJ
2389 2390
  if (fl->total_size() > allocation_from_dictionary_limit) {
    p = fl->get_block(raw_word_size);
2391
  }
2392 2393
  if (p == NULL) {
    p = allocate_work(raw_word_size);
2394 2395 2396
  }
  Metadebug::deallocate_block_a_lot(this, raw_word_size);

2397
  return p;
2398 2399 2400 2401
}

// Returns the address of spaced allocated for "word_size".
// This methods does not know about blocks (Metablocks)
2402
MetaWord* SpaceManager::allocate_work(size_t word_size) {
2403 2404 2405 2406 2407 2408 2409
  assert_lock_strong(_lock);
#ifdef ASSERT
  if (Metadebug::test_metadata_failure()) {
    return NULL;
  }
#endif
  // Is there space in the current chunk?
2410
  MetaWord* result = NULL;
2411 2412 2413 2414 2415 2416

  // For DumpSharedSpaces, only allocate out of the current chunk which is
  // never null because we gave it the size we wanted.   Caller reports out
  // of memory if this returns null.
  if (DumpSharedSpaces) {
    assert(current_chunk() != NULL, "should never happen");
2417
    inc_used_metrics(word_size);
2418 2419 2420 2421 2422 2423 2424 2425 2426
    return current_chunk()->allocate(word_size); // caller handles null result
  }
  if (current_chunk() != NULL) {
    result = current_chunk()->allocate(word_size);
  }

  if (result == NULL) {
    result = grow_and_allocate(word_size);
  }
2427
  if (result != 0) {
2428
    inc_used_metrics(word_size);
2429 2430
    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
           "Head of the list is being allocated");
2431 2432 2433 2434 2435 2436 2437 2438 2439
  }

  return result;
}

void SpaceManager::verify() {
  // If there are blocks in the dictionary, then
  // verfication of chunks does not work since
  // being in the dictionary alters a chunk.
2440
  if (block_freelists()->total_size() == 0) {
2441
    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2442 2443 2444
      Metachunk* curr = chunks_in_use(i);
      while (curr != NULL) {
        curr->verify();
2445
        verify_chunk_size(curr);
2446 2447 2448 2449 2450 2451
        curr = curr->next();
      }
    }
  }
}

2452 2453
void SpaceManager::verify_chunk_size(Metachunk* chunk) {
  assert(is_humongous(chunk->word_size()) ||
2454 2455 2456
         chunk->word_size() == medium_chunk_size() ||
         chunk->word_size() == small_chunk_size() ||
         chunk->word_size() == specialized_chunk_size(),
2457 2458 2459 2460
         "Chunk size is wrong");
  return;
}

2461
#ifdef ASSERT
2462
void SpaceManager::verify_allocated_blocks_words() {
2463
  // Verification is only guaranteed at a safepoint.
2464 2465 2466
  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
    "Verification can fail if the applications is running");
  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2467 2468
    err_msg("allocation total is not consistent " SIZE_FORMAT
            " vs " SIZE_FORMAT,
2469
            allocated_blocks_words(), sum_used_in_chunks_in_use()));
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
}

#endif

void SpaceManager::dump(outputStream* const out) const {
  size_t curr_total = 0;
  size_t waste = 0;
  uint i = 0;
  size_t used = 0;
  size_t capacity = 0;

  // Add up statistics for all chunks in this SpaceManager.
2482
  for (ChunkIndex index = ZeroIndex;
2483
       index < NumberOfInUseLists;
2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
       index = next_chunk_index(index)) {
    for (Metachunk* curr = chunks_in_use(index);
         curr != NULL;
         curr = curr->next()) {
      out->print("%d) ", i++);
      curr->print_on(out);
      curr_total += curr->word_size();
      used += curr->used_word_size();
      capacity += curr->capacity_word_size();
      waste += curr->free_word_size() + curr->overhead();;
    }
  }

S
stefank 已提交
2497 2498 2499 2500
  if (TraceMetadataChunkAllocation && Verbose) {
    block_freelists()->print_on(out);
  }

2501
  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2502 2503 2504 2505 2506 2507 2508 2509
  // Free space isn't wasted.
  waste -= free;

  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
}

2510
#ifndef PRODUCT
2511
void SpaceManager::mangle_freed_chunks() {
2512
  for (ChunkIndex index = ZeroIndex;
2513
       index < NumberOfInUseLists;
2514 2515 2516 2517 2518 2519 2520 2521
       index = next_chunk_index(index)) {
    for (Metachunk* curr = chunks_in_use(index);
         curr != NULL;
         curr = curr->next()) {
      curr->mangle();
    }
  }
}
2522
#endif // PRODUCT
2523 2524 2525

// MetaspaceAux

2526

2527 2528
size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2529

2530 2531 2532 2533 2534
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  return list == NULL ? 0 : list->free_bytes();
}

2535
size_t MetaspaceAux::free_bytes() {
2536
  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2537 2538
}

2539
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2540
  assert_lock_strong(SpaceManager::expand_lock());
2541
  assert(words <= allocated_capacity_words(mdtype),
2542
    err_msg("About to decrement below 0: words " SIZE_FORMAT
2543 2544 2545
            " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
            words, mdtype, allocated_capacity_words(mdtype)));
  _allocated_capacity_words[mdtype] -= words;
2546 2547
}

2548
void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2549 2550
  assert_lock_strong(SpaceManager::expand_lock());
  // Needs to be atomic
2551
  _allocated_capacity_words[mdtype] += words;
2552 2553
}

2554 2555
void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
  assert(words <= allocated_used_words(mdtype),
2556
    err_msg("About to decrement below 0: words " SIZE_FORMAT
2557 2558
            " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
            words, mdtype, allocated_used_words(mdtype)));
2559 2560 2561 2562 2563
  // For CMS deallocation of the Metaspaces occurs during the
  // sweep which is a concurrent phase.  Protection by the expand_lock()
  // is not enough since allocation is on a per Metaspace basis
  // and protected by the Metaspace lock.
  jlong minus_words = (jlong) - (jlong) words;
2564
  Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2565 2566
}

2567
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2568 2569 2570 2571
  // _allocated_used_words tracks allocations for
  // each piece of metadata.  Those allocations are
  // generally done concurrently by different application
  // threads so must be done atomically.
2572
  Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2573 2574 2575
}

size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2576 2577 2578 2579
  size_t used = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
2580
    // Sum allocated_blocks_words for each metaspace
2581
    if (msp != NULL) {
2582
      used += msp->used_words_slow(mdtype);
2583 2584 2585 2586 2587
    }
  }
  return used * BytesPerWord;
}

E
ehelin 已提交
2588
size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2589 2590 2591 2592 2593
  size_t free = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
E
ehelin 已提交
2594
      free += msp->free_words_slow(mdtype);
2595 2596 2597 2598 2599
    }
  }
  return free * BytesPerWord;
}

2600
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2601 2602 2603
  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
    return 0;
  }
2604 2605 2606
  // Don't count the space in the freelists.  That space will be
  // added to the capacity calculation as needed.
  size_t capacity = 0;
2607 2608 2609 2610
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
2611
      capacity += msp->capacity_words_slow(mdtype);
2612 2613 2614 2615 2616
    }
  }
  return capacity * BytesPerWord;
}

E
ehelin 已提交
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
size_t MetaspaceAux::capacity_bytes_slow() {
#ifdef PRODUCT
  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
#endif
  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
        " class_capacity + non_class_capacity " SIZE_FORMAT
        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
        allocated_capacity_bytes(), class_capacity + non_class_capacity,
        class_capacity, non_class_capacity));

  return class_capacity + non_class_capacity;
}

size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2635
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2636 2637 2638 2639 2640 2641
  return list == NULL ? 0 : list->reserved_bytes();
}

size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  return list == NULL ? 0 : list->committed_bytes();
2642 2643
}

E
ehelin 已提交
2644
size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2645

E
ehelin 已提交
2646
size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2647 2648
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  if (list == NULL) {
2649 2650
    return 0;
  }
2651
  ChunkManager* chunk = list->chunk_manager();
2652
  chunk->slow_verify();
E
ehelin 已提交
2653
  return chunk->free_chunks_total_words();
2654 2655
}

E
ehelin 已提交
2656 2657
size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  return free_chunks_total_words(mdtype) * BytesPerWord;
2658 2659
}

E
ehelin 已提交
2660 2661 2662
size_t MetaspaceAux::free_chunks_total_words() {
  return free_chunks_total_words(Metaspace::ClassType) +
         free_chunks_total_words(Metaspace::NonClassType);
2663 2664
}

E
ehelin 已提交
2665 2666
size_t MetaspaceAux::free_chunks_total_bytes() {
  return free_chunks_total_words() * BytesPerWord;
2667 2668
}

2669 2670 2671 2672 2673
void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  gclog_or_tty->print(", [Metaspace:");
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print(" "  SIZE_FORMAT
                        "->" SIZE_FORMAT
2674
                        "("  SIZE_FORMAT ")",
2675
                        prev_metadata_used,
2676
                        allocated_used_bytes(),
E
ehelin 已提交
2677
                        reserved_bytes());
2678 2679 2680
  } else {
    gclog_or_tty->print(" "  SIZE_FORMAT "K"
                        "->" SIZE_FORMAT "K"
2681
                        "("  SIZE_FORMAT "K)",
E
ehelin 已提交
2682 2683 2684
                        prev_metadata_used/K,
                        allocated_used_bytes()/K,
                        reserved_bytes()/K);
2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
  }

  gclog_or_tty->print("]");
}

// This is printed when PrintGCDetails
void MetaspaceAux::print_on(outputStream* out) {
  Metaspace::MetadataType nct = Metaspace::NonClassType;

  out->print_cr(" Metaspace total "
                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                " reserved " SIZE_FORMAT "K",
E
ehelin 已提交
2697
                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
2698 2699 2700 2701 2702 2703

  out->print_cr("  data space     "
                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                " reserved " SIZE_FORMAT "K",
                allocated_capacity_bytes(nct)/K,
                allocated_used_bytes(nct)/K,
E
ehelin 已提交
2704
                reserved_bytes(nct)/K);
2705 2706 2707 2708 2709 2710 2711
  if (Metaspace::using_class_space()) {
    Metaspace::MetadataType ct = Metaspace::ClassType;
    out->print_cr("  class space    "
                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                  " reserved " SIZE_FORMAT "K",
                  allocated_capacity_bytes(ct)/K,
                  allocated_used_bytes(ct)/K,
E
ehelin 已提交
2712
                  reserved_bytes(ct)/K);
2713
  }
2714 2715 2716 2717 2718
}

// Print information for class space and data space separately.
// This is almost the same as above.
void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
E
ehelin 已提交
2719
  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2720 2721
  size_t capacity_bytes = capacity_bytes_slow(mdtype);
  size_t used_bytes = used_bytes_slow(mdtype);
E
ehelin 已提交
2722
  size_t free_bytes = free_bytes_slow(mdtype);
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
  size_t used_and_free = used_bytes + free_bytes +
                           free_chunks_capacity_bytes;
  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
             "K + unused in chunks " SIZE_FORMAT "K  + "
             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
             "K  capacity in allocated chunks " SIZE_FORMAT "K",
             used_bytes / K,
             free_bytes / K,
             free_chunks_capacity_bytes / K,
             used_and_free / K,
             capacity_bytes / K);
2734 2735
  // Accounting can only be correct if we got the values during a safepoint
  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2736 2737
}

2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763
// Print total fragmentation for class metaspaces
void MetaspaceAux::print_class_waste(outputStream* out) {
  assert(Metaspace::using_class_space(), "class metaspace not used");
  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
    }
  }
  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
                "large count " SIZE_FORMAT,
                cls_specialized_count, cls_specialized_waste,
                cls_small_count, cls_small_waste,
                cls_medium_count, cls_medium_waste, cls_humongous_count);
}
2764

2765 2766
// Print total fragmentation for data and class metaspaces separately
void MetaspaceAux::print_waste(outputStream* out) {
2767 2768
  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2769 2770 2771 2772 2773

  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
2774 2775
      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2776
      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2777
      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2778
      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2779
      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2780
      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2781 2782 2783
    }
  }
  out->print_cr("Total fragmentation waste (words) doesn't count free space");
2784 2785
  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2786 2787
                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
                        "large count " SIZE_FORMAT,
2788
             specialized_count, specialized_waste, small_count,
2789
             small_waste, medium_count, medium_waste, humongous_count);
2790 2791 2792
  if (Metaspace::using_class_space()) {
    print_class_waste(out);
  }
2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
}

// Dump global metaspace things from the end of ClassLoaderDataGraph
void MetaspaceAux::dump(outputStream* out) {
  out->print_cr("All Metaspace:");
  out->print("data space: "); print_on(out, Metaspace::NonClassType);
  out->print("class space: "); print_on(out, Metaspace::ClassType);
  print_waste(out);
}

2803 2804
void MetaspaceAux::verify_free_chunks() {
  Metaspace::space_list()->chunk_manager()->verify();
2805 2806 2807
  if (Metaspace::using_class_space()) {
    Metaspace::class_space_list()->chunk_manager()->verify();
  }
2808 2809
}

2810 2811 2812
void MetaspaceAux::verify_capacity() {
#ifdef ASSERT
  size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2813
  // For purposes of the running sum of capacity, verify against capacity
2814 2815 2816 2817 2818
  size_t capacity_in_use_bytes = capacity_bytes_slow();
  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
    err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
            " capacity_bytes_slow()" SIZE_FORMAT,
            running_sum_capacity_bytes, capacity_in_use_bytes));
2819 2820 2821 2822 2823 2824 2825 2826 2827
  for (Metaspace::MetadataType i = Metaspace::ClassType;
       i < Metaspace:: MetadataTypeCount;
       i = (Metaspace::MetadataType)(i + 1)) {
    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
    assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
      err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
              " capacity_bytes_slow(%u)" SIZE_FORMAT,
              i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
  }
2828 2829 2830 2831 2832 2833
#endif
}

void MetaspaceAux::verify_used() {
#ifdef ASSERT
  size_t running_sum_used_bytes = allocated_used_bytes();
2834
  // For purposes of the running sum of used, verify against used
2835 2836 2837
  size_t used_in_use_bytes = used_bytes_slow();
  assert(allocated_used_bytes() == used_in_use_bytes,
    err_msg("allocated_used_bytes() " SIZE_FORMAT
2838
            " used_bytes_slow()" SIZE_FORMAT,
2839
            allocated_used_bytes(), used_in_use_bytes));
2840 2841 2842 2843 2844 2845 2846 2847 2848
  for (Metaspace::MetadataType i = Metaspace::ClassType;
       i < Metaspace:: MetadataTypeCount;
       i = (Metaspace::MetadataType)(i + 1)) {
    size_t used_in_use_bytes = used_bytes_slow(i);
    assert(allocated_used_bytes(i) == used_in_use_bytes,
      err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
              " used_bytes_slow(%u)" SIZE_FORMAT,
              i, allocated_used_bytes(i), i, used_in_use_bytes));
  }
2849 2850 2851 2852 2853 2854 2855 2856 2857
#endif
}

void MetaspaceAux::verify_metrics() {
  verify_capacity();
  verify_used();
}


2858 2859 2860
// Metaspace methods

size_t Metaspace::_first_chunk_word_size = 0;
2861
size_t Metaspace::_first_class_chunk_word_size = 0;
2862

2863 2864
Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  initialize(lock, type);
2865 2866 2867 2868
}

Metaspace::~Metaspace() {
  delete _vsm;
2869 2870 2871
  if (using_class_space()) {
    delete _class_vsm;
  }
2872 2873 2874 2875 2876 2877 2878
}

VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL;

#define VIRTUALSPACEMULTIPLIER 2

2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
#ifdef _LP64
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
  // narrow_klass_base is the lower of the metaspace base and the cds base
  // (if cds is enabled).  The narrow_klass_shift depends on the distance
  // between the lower base and higher address.
  address lower_base;
  address higher_address;
  if (UseSharedSpaces) {
    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                          (address)(metaspace_base + class_metaspace_size()));
    lower_base = MIN2(metaspace_base, cds_base);
  } else {
    higher_address = metaspace_base + class_metaspace_size();
    lower_base = metaspace_base;
  }
  Universe::set_narrow_klass_base(lower_base);
  if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
    Universe::set_narrow_klass_shift(0);
  } else {
    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
  }
}

// Return TRUE if the specified metaspace_base and cds_base are close enough
// to work with compressed klass pointers.
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2908
  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2909 2910 2911 2912 2913 2914 2915 2916 2917
  address lower_base = MIN2((address)metaspace_base, cds_base);
  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                                (address)(metaspace_base + class_metaspace_size()));
  return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
}

// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  assert(using_class_space(), "called improperly");
2918
  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
  assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
         "Metaspace size is too big");

  ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
                                             os::vm_allocation_granularity(),
                                             false, requested_addr, 0);
  if (!metaspace_rs.is_reserved()) {
    if (UseSharedSpaces) {
      // Keep trying to allocate the metaspace, increasing the requested_addr
      // by 1GB each time, until we reach an address that will no longer allow
      // use of CDS with compressed klass pointers.
      char *addr = requested_addr;
      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
        addr = addr + 1*G;
        metaspace_rs = ReservedSpace(class_metaspace_size(),
                                     os::vm_allocation_granularity(), false, addr, 0);
      }
    }

    // If no successful allocation then try to allocate the space anywhere.  If
    // that fails then OOM doom.  At this point we cannot try allocating the
2941 2942 2943
    // metaspace as if UseCompressedClassPointers is off because too much
    // initialization has happened that depends on UseCompressedClassPointers.
    // So, UseCompressedClassPointers cannot be turned off at this point.
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
    if (!metaspace_rs.is_reserved()) {
      metaspace_rs = ReservedSpace(class_metaspace_size(),
                                   os::vm_allocation_granularity(), false);
      if (!metaspace_rs.is_reserved()) {
        vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
                                              class_metaspace_size()));
      }
    }
  }

  // If we got here then the metaspace got allocated.
  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);

  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
    FileMapInfo::stop_sharing_and_unmap(
        "Could not allocate metaspace at a compatible address");
  }

  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
                                  UseSharedSpaces ? (address)cds_base : 0);

  initialize_class_space(metaspace_rs);

  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
                            Universe::narrow_klass_base(), Universe::narrow_klass_shift());
    gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
                           class_metaspace_size(), metaspace_rs.base(), requested_addr);
  }
}

2976
// For UseCompressedClassPointers the class space is reserved above the top of
2977 2978 2979
// the Java heap.  The argument passed in is at the base of the compressed space.
void Metaspace::initialize_class_space(ReservedSpace rs) {
  // The reserved space size may be bigger because of alignment, esp with UseLargePages
2980 2981
  assert(rs.size() >= CompressedClassSpaceSize,
         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
2982 2983 2984 2985 2986 2987
  assert(using_class_space(), "Must be using class space");
  _class_space_list = new VirtualSpaceList(rs);
}

#endif

2988 2989 2990
void Metaspace::global_initialize() {
  // Initialize the alignment for shared spaces.
  int max_alignment = os::vm_page_size();
2991 2992
  size_t cds_total = 0;

2993
  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
2994 2995
                                         os::vm_allocation_granularity()));

2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
  MetaspaceShared::set_max_alignment(max_alignment);

  if (DumpSharedSpaces) {
    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);

    // Initialize with the sum of the shared space sizes.  The read-only
    // and read write metaspace chunks will be allocated out of this and the
    // remainder is the misc code and data chunks.
3007 3008 3009 3010 3011 3012
    cds_total = FileMapInfo::shared_spaces_size();
    _space_list = new VirtualSpaceList(cds_total/wordSize);

#ifdef _LP64
    // Set the compressed klass pointer base so that decoding of these pointers works
    // properly when creating the shared archive.
3013 3014
    assert(UseCompressedOops && UseCompressedClassPointers,
      "UseCompressedOops and UseCompressedClassPointers must be set");
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
    if (TraceMetavirtualspaceAllocation && Verbose) {
      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
                             _space_list->current_virtual_space()->bottom());
    }

    // Set the shift to zero.
    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
           "CDS region is too large");
    Universe::set_narrow_klass_shift(0);
#endif

3027 3028 3029 3030
  } else {
    // If using shared space, open the file that contains the shared space
    // and map in the memory before initializing the rest of metaspace (so
    // the addresses don't conflict)
3031
    address cds_address = NULL;
3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045
    if (UseSharedSpaces) {
      FileMapInfo* mapinfo = new FileMapInfo();
      memset(mapinfo, 0, sizeof(FileMapInfo));

      // Open the shared archive file, read and validate the header. If
      // initialization fails, shared spaces [UseSharedSpaces] are
      // disabled and the file is closed.
      // Map in spaces now also
      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
        FileMapInfo::set_current_info(mapinfo);
      } else {
        assert(!mapinfo->is_open() && !UseSharedSpaces,
               "archive file not closed or shared spaces not disabled.");
      }
3046 3047 3048 3049 3050
      cds_total = FileMapInfo::shared_spaces_size();
      cds_address = (address)mapinfo->region_base(0);
    }

#ifdef _LP64
3051
    // If UseCompressedClassPointers is set then allocate the metaspace area
3052 3053 3054 3055 3056 3057 3058
    // above the heap and above the CDS area (if it exists).
    if (using_class_space()) {
      if (UseSharedSpaces) {
        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
      } else {
        allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
      }
3059
    }
3060
#endif
3061

3062
    // Initialize these before initializing the VirtualSpaceList
3063
    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3064 3065 3066 3067 3068
    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
    // Make the first class chunk bigger than a medium chunk so it's not put
    // on the medium chunk list.   The next chunk will be small and progress
    // from there.  This size calculated by -version.
    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3069
                                       (CompressedClassSpaceSize/BytesPerWord)*2);
3070
    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3071 3072
    // Arbitrarily set the initial virtual space to a multiple
    // of the boot class loader size.
3073
    size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
3074 3075 3076 3077 3078
    // Initialize the list of virtual spaces.
    _space_list = new VirtualSpaceList(word_size);
  }
}

3079
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3080 3081 3082 3083

  assert(space_list() != NULL,
    "Metadata VirtualSpaceList has not been initialized");

3084
  _vsm = new SpaceManager(NonClassType, lock, space_list());
3085 3086 3087
  if (_vsm == NULL) {
    return;
  }
3088 3089
  size_t word_size;
  size_t class_word_size;
3090
  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3091

3092 3093 3094
  if (using_class_space()) {
    assert(class_space_list() != NULL,
      "Class VirtualSpaceList has not been initialized");
3095

3096 3097 3098 3099 3100
    // Allocate SpaceManager for classes.
    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
    if (_class_vsm == NULL) {
      return;
    }
3101 3102 3103 3104 3105 3106
  }

  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);

  // Allocate chunk for metadata objects
  Metachunk* new_chunk =
3107 3108
     space_list()->get_initialization_chunk(word_size,
                                            vsm()->medium_chunk_bunch());
3109 3110 3111 3112 3113 3114 3115
  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  if (new_chunk != NULL) {
    // Add to this manager's list of chunks in use and current_chunk().
    vsm()->add_chunk(new_chunk, true);
  }

  // Allocate chunk for class metadata objects
3116 3117 3118 3119 3120 3121 3122
  if (using_class_space()) {
    Metachunk* class_chunk =
       class_space_list()->get_initialization_chunk(class_word_size,
                                                    class_vsm()->medium_chunk_bunch());
    if (class_chunk != NULL) {
      class_vsm()->add_chunk(class_chunk, true);
    }
3123
  }
3124 3125 3126

  _alloc_record_head = NULL;
  _alloc_record_tail = NULL;
3127 3128
}

3129 3130 3131 3132 3133
size_t Metaspace::align_word_size_up(size_t word_size) {
  size_t byte_size = word_size * wordSize;
  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
}

3134 3135
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  // DumpSharedSpaces doesn't use class metadata area (yet)
3136
  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3137
  if (mdtype == ClassType && using_class_space()) {
3138
    return  class_vsm()->allocate(word_size);
3139
  } else {
3140
    return  vsm()->allocate(word_size);
3141 3142 3143
  }
}

3144 3145 3146 3147
MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  MetaWord* result;
  MetaspaceGC::set_expand_after_GC(true);
  size_t before_inc = MetaspaceGC::capacity_until_GC();
3148 3149
  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3150 3151 3152 3153
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
      " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
  }
3154

3155 3156 3157 3158 3159
  result = allocate(word_size, mdtype);

  return result;
}

3160 3161 3162 3163 3164 3165 3166
// Space allocated in the Metaspace.  This may
// be across several metadata virtual spaces.
char* Metaspace::bottom() const {
  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
  return (char*)vsm()->current_chunk()->bottom();
}

3167
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3168 3169 3170 3171 3172
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  }
3173 3174
}

E
ehelin 已提交
3175
size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3176 3177 3178 3179 3180
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_free_in_chunks_in_use();
  }
3181 3182 3183 3184 3185 3186 3187
}

// Space capacity in the Metaspace.  It includes
// space in the list of chunks from which allocations
// have been made. Don't include space in the global freelist and
// in the space available in the dictionary which
// is already counted in some chunk.
3188
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3189 3190 3191 3192 3193
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_capacity_in_chunks_in_use();
  }
3194 3195
}

3196 3197 3198 3199 3200 3201 3202 3203
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
  return used_words_slow(mdtype) * BytesPerWord;
}

size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
  return capacity_words_slow(mdtype) * BytesPerWord;
}

3204 3205 3206
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
  if (SafepointSynchronize::is_at_safepoint()) {
    assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3207
    // Don't take Heap_lock
3208
    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3209 3210 3211 3212 3213 3214 3215
    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
      // Dark matter.  Too small for dictionary.
#ifdef ASSERT
      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
      return;
    }
3216 3217
    if (is_class && using_class_space()) {
      class_vsm()->deallocate(ptr, word_size);
3218
    } else {
3219
      vsm()->deallocate(ptr, word_size);
3220 3221
    }
  } else {
3222
    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3223

3224 3225 3226 3227 3228 3229 3230
    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
      // Dark matter.  Too small for dictionary.
#ifdef ASSERT
      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
      return;
    }
3231
    if (is_class && using_class_space()) {
3232
      class_vsm()->deallocate(ptr, word_size);
3233
    } else {
3234
      vsm()->deallocate(ptr, word_size);
3235 3236 3237 3238
    }
  }
}

3239
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3240
                              bool read_only, MetaspaceObj::Type type, TRAPS) {
3241 3242 3243 3244 3245
  if (HAS_PENDING_EXCEPTION) {
    assert(false, "Should not allocate with exception pending");
    return NULL;  // caller does a CHECK_NULL too
  }

3246 3247
  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;

3248 3249 3250 3251 3252 3253 3254 3255 3256
  // SSS: Should we align the allocations and make sure the sizes are aligned.
  MetaWord* result = NULL;

  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
        "ClassLoaderData::the_null_class_loader_data() should have been used.");
  // Allocate in metaspaces without taking out a lock, because it deadlocks
  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  // to revisit this for application class data sharing.
  if (DumpSharedSpaces) {
3257 3258 3259
    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
    result = space->allocate(word_size, NonClassType);
3260 3261
    if (result == NULL) {
      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3262 3263
    } else {
      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3264
    }
3265
    return Metablock::initialize(result, word_size);
3266 3267 3268 3269 3270 3271 3272
  }

  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);

  if (result == NULL) {
    // Try to clean out some memory and retry.
    result =
3273
      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3274 3275 3276 3277
        loader_data, word_size, mdtype);

    // If result is still null, we are out of memory.
    if (result == NULL) {
3278 3279 3280
      if (Verbose && TraceMetadataChunkAllocation) {
        gclog_or_tty->print_cr("Metaspace allocation failed for size "
          SIZE_FORMAT, word_size);
3281
        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
3282 3283
        MetaspaceAux::dump(gclog_or_tty);
      }
3284
      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3285
      const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
3286 3287
                                                         "Metadata space";
      report_java_out_of_memory(space_string);
3288 3289 3290 3291

      if (JvmtiExport::should_post_resource_exhausted()) {
        JvmtiExport::post_resource_exhausted(
            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3292 3293 3294 3295 3296 3297
            space_string);
      }
      if (mdtype == ClassType) {
        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
      } else {
        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
3298 3299 3300
      }
    }
  }
3301
  return Metablock::initialize(result, word_size);
3302 3303
}

3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  assert(DumpSharedSpaces, "sanity");

  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
  if (_alloc_record_head == NULL) {
    _alloc_record_head = _alloc_record_tail = rec;
  } else {
    _alloc_record_tail->_next = rec;
    _alloc_record_tail = rec;
  }
}

void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");

  address last_addr = (address)bottom();

  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
    address ptr = rec->_ptr;
    if (last_addr < ptr) {
      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
    }
    closure->doit(ptr, rec->_type, rec->_byte_size);
    last_addr = ptr + rec->_byte_size;
  }

  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
  if (last_addr < top) {
    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
  }
}

3336 3337 3338 3339
void Metaspace::purge() {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  space_list()->purge();
3340 3341 3342
  if (using_class_space()) {
    class_space_list()->purge();
  }
3343 3344
}

3345 3346 3347
void Metaspace::print_on(outputStream* out) const {
  // Print both class virtual space counts and metaspace.
  if (Verbose) {
3348 3349
    vsm()->print_on(out);
    if (using_class_space()) {
3350
      class_vsm()->print_on(out);
3351
    }
3352 3353 3354
  }
}

3355
bool Metaspace::contains(const void * ptr) {
3356 3357 3358
  if (MetaspaceShared::is_in_shared_space(ptr)) {
    return true;
  }
3359 3360 3361 3362 3363
  // This is checked while unlocked.  As long as the virtualspaces are added
  // at the end, the pointer will be in one of them.  The virtual spaces
  // aren't deleted presently.  When they are, some sort of locking might
  // be needed.  Note, locking this can cause inversion problems with the
  // caller in MetaspaceObj::is_metadata() function.
3364
  return space_list()->contains(ptr) ||
3365
         (using_class_space() && class_space_list()->contains(ptr));
3366 3367 3368 3369
}

void Metaspace::verify() {
  vsm()->verify();
3370 3371 3372
  if (using_class_space()) {
    class_vsm()->verify();
  }
3373 3374 3375 3376 3377
}

void Metaspace::dump(outputStream* const out) const {
  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  vsm()->dump(out);
3378 3379 3380 3381
  if (using_class_space()) {
    out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
    class_vsm()->dump(out);
  }
3382
}
3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438

/////////////// Unit tests ///////////////

#ifndef PRODUCT

class MetaspaceAuxTest : AllStatic {
 public:
  static void test_reserved() {
    size_t reserved = MetaspaceAux::reserved_bytes();

    assert(reserved > 0, "assert");

    size_t committed  = MetaspaceAux::committed_bytes();
    assert(committed <= reserved, "assert");

    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
    assert(reserved_metadata > 0, "assert");
    assert(reserved_metadata <= reserved, "assert");

    if (UseCompressedClassPointers) {
      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
      assert(reserved_class > 0, "assert");
      assert(reserved_class < reserved, "assert");
    }
  }

  static void test_committed() {
    size_t committed = MetaspaceAux::committed_bytes();

    assert(committed > 0, "assert");

    size_t reserved  = MetaspaceAux::reserved_bytes();
    assert(committed <= reserved, "assert");

    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
    assert(committed_metadata > 0, "assert");
    assert(committed_metadata <= committed, "assert");

    if (UseCompressedClassPointers) {
      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
      assert(committed_class > 0, "assert");
      assert(committed_class < committed, "assert");
    }
  }

  static void test() {
    test_reserved();
    test_committed();
  }
};

void MetaspaceAux_test() {
  MetaspaceAuxTest::test();
}

#endif