You need to sign in or sign up before continuing.
metaspace.cpp 120.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
26
#include "memory/allocation.hpp"
27
#include "memory/binaryTreeDictionary.hpp"
28
#include "memory/freeList.hpp"
29 30 31
#include "memory/collectorPolicy.hpp"
#include "memory/filemap.hpp"
#include "memory/freeList.hpp"
32 33
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
34 35 36 37 38
#include "memory/metaspace.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/globals.hpp"
39
#include "runtime/java.hpp"
40
#include "runtime/mutex.hpp"
41
#include "runtime/orderAccess.hpp"
42 43 44 45
#include "services/memTracker.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"

46 47
typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
48 49 50 51
// Define this macro to enable slow integrity checking of
// the free chunk lists
const bool metaspace_slow_verify = false;

52 53 54
// Parameters for stress mode testing
const uint metadata_deallocate_a_lot_block = 10;
const uint metadata_deallocate_a_lock_chunk = 3;
55
size_t const allocation_from_dictionary_limit = 4 * K;
56 57 58

MetaWord* last_allocated = 0;

59 60
size_t Metaspace::_class_metaspace_size;

61 62
// Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex {
63 64 65 66 67 68 69 70 71 72 73 74 75 76
  ZeroIndex = 0,
  SpecializedIndex = ZeroIndex,
  SmallIndex = SpecializedIndex + 1,
  MediumIndex = SmallIndex + 1,
  HumongousIndex = MediumIndex + 1,
  NumberOfFreeLists = 3,
  NumberOfInUseLists = 4
};

enum ChunkSizes {    // in words.
  ClassSpecializedChunk = 128,
  SpecializedChunk = 128,
  ClassSmallChunk = 256,
  SmallChunk = 512,
77
  ClassMediumChunk = 4 * K,
78 79
  MediumChunk = 8 * K,
  HumongousChunkGranularity = 8
80 81 82
};

static ChunkIndex next_chunk_index(ChunkIndex i) {
83
  assert(i < NumberOfInUseLists, "Out of bound");
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
  return (ChunkIndex) (i+1);
}

// Originally _capacity_until_GC was set to MetaspaceSize here but
// the default MetaspaceSize before argument processing was being
// used which was not the desired value.  See the code
// in should_expand() to see how the initialization is handled
// now.
size_t MetaspaceGC::_capacity_until_GC = 0;
bool MetaspaceGC::_expand_after_GC = false;
uint MetaspaceGC::_shrink_factor = 0;
bool MetaspaceGC::_should_concurrent_collect = false;

// Blocks of space for metadata are allocated out of Metachunks.
//
// Metachunk are allocated out of MetadataVirtualspaces and once
// allocated there is no explicit link between a Metachunk and
// the MetadataVirtualspaces from which it was allocated.
//
// Each SpaceManager maintains a
// list of the chunks it is using and the current chunk.  The current
// chunk is the chunk from which allocations are done.  Space freed in
// a chunk is placed on the free list of blocks (BlockFreelist) and
// reused from there.

109
typedef class FreeList<Metachunk> ChunkList;
110 111 112 113 114

// Manages the global free lists of chunks.
// Has three lists of free chunks, and a total size and
// count that includes all three

115
class ChunkManager : public CHeapObj<mtInternal> {
116 117

  // Free list of chunks of different sizes.
118
  //   SpecializedChunk
119 120 121
  //   SmallChunk
  //   MediumChunk
  //   HumongousChunk
122 123
  ChunkList _free_chunks[NumberOfFreeLists];

124

125 126
  //   HumongousChunk
  ChunkTreeDictionary _humongous_dictionary;
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146

  // ChunkManager in all lists of this type
  size_t _free_chunks_total;
  size_t _free_chunks_count;

  void dec_free_chunks_total(size_t v) {
    assert(_free_chunks_count > 0 &&
             _free_chunks_total > 0,
             "About to go negative");
    Atomic::add_ptr(-1, &_free_chunks_count);
    jlong minus_v = (jlong) - (jlong) v;
    Atomic::add_ptr(minus_v, &_free_chunks_total);
  }

  // Debug support

  size_t sum_free_chunks();
  size_t sum_free_chunks_count();

  void locked_verify_free_chunks_total();
147 148 149 150 151
  void slow_locked_verify_free_chunks_total() {
    if (metaspace_slow_verify) {
      locked_verify_free_chunks_total();
    }
  }
152
  void locked_verify_free_chunks_count();
153 154 155 156 157
  void slow_locked_verify_free_chunks_count() {
    if (metaspace_slow_verify) {
      locked_verify_free_chunks_count();
    }
  }
158 159 160 161
  void verify_free_chunks_count();

 public:

162 163 164 165 166 167
  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
      : _free_chunks_total(0), _free_chunks_count(0) {
    _free_chunks[SpecializedIndex].set_size(specialized_size);
    _free_chunks[SmallIndex].set_size(small_size);
    _free_chunks[MediumIndex].set_size(medium_size);
  }
168 169 170 171 172

  // add or delete (return) a chunk to the global freelist.
  Metachunk* chunk_freelist_allocate(size_t word_size);
  void chunk_freelist_deallocate(Metachunk* chunk);

173 174 175 176
  // Map a size to a list index assuming that there are lists
  // for special, small, medium, and humongous chunks.
  static ChunkIndex list_index(size_t size);

177 178 179 180
  // Remove the chunk from its freelist.  It is
  // expected to be on one of the _free_chunks[] lists.
  void remove_chunk(Metachunk* chunk);

181 182 183 184
  // Add the simple linked list of chunks to the freelist of chunks
  // of type index.
  void return_chunks(ChunkIndex index, Metachunk* chunks);

185
  // Total of the space in the free chunks list
E
ehelin 已提交
186 187
  size_t free_chunks_total_words();
  size_t free_chunks_total_bytes();
188 189 190 191 192 193 194 195

  // Number of chunks in the free chunks list
  size_t free_chunks_count();

  void inc_free_chunks_total(size_t v, size_t count = 1) {
    Atomic::add_ptr(count, &_free_chunks_count);
    Atomic::add_ptr(v, &_free_chunks_total);
  }
196 197 198
  ChunkTreeDictionary* humongous_dictionary() {
    return &_humongous_dictionary;
  }
199 200 201 202 203 204 205 206 207 208 209 210 211

  ChunkList* free_chunks(ChunkIndex index);

  // Returns the list for the given chunk word size.
  ChunkList* find_free_chunks_list(size_t word_size);

  // Add and remove from a list by size.  Selects
  // list based on size of chunk.
  void free_chunks_put(Metachunk* chuck);
  Metachunk* free_chunks_get(size_t chunk_word_size);

  // Debug support
  void verify();
212 213 214 215 216
  void slow_verify() {
    if (metaspace_slow_verify) {
      verify();
    }
  }
217
  void locked_verify();
218 219 220 221 222
  void slow_locked_verify() {
    if (metaspace_slow_verify) {
      locked_verify();
    }
  }
223 224 225 226
  void verify_free_chunks_total();

  void locked_print_free_chunks(outputStream* st);
  void locked_print_sum_free_chunks(outputStream* st);
227

228
  void print_on(outputStream* st) const;
229 230 231 232 233
};

// Used to manage the free list of Metablocks (a block corresponds
// to the allocation of a quantum of metadata).
class BlockFreelist VALUE_OBJ_CLASS_SPEC {
234 235
  BlockTreeDictionary* _dictionary;
  static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
236

237 238 239 240
  // Only allocate and split from freelist if the size of the allocation
  // is at least 1/4th the size of the available block.
  const static int WasteMultiplier = 4;

241
  // Accessors
242
  BlockTreeDictionary* dictionary() const { return _dictionary; }
243 244 245 246 247 248

 public:
  BlockFreelist();
  ~BlockFreelist();

  // Get and return a block to the free list
249 250
  MetaWord* get_block(size_t word_size);
  void return_block(MetaWord* p, size_t word_size);
251

252 253
  size_t total_size() {
  if (dictionary() == NULL) {
254
    return 0;
255 256
  } else {
    return dictionary()->total_size();
257
  }
258
}
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

  void print_on(outputStream* st) const;
};

class VirtualSpaceNode : public CHeapObj<mtClass> {
  friend class VirtualSpaceList;

  // Link to next VirtualSpaceNode
  VirtualSpaceNode* _next;

  // total in the VirtualSpace
  MemRegion _reserved;
  ReservedSpace _rs;
  VirtualSpace _virtual_space;
  MetaWord* _top;
274 275
  // count of chunks contained in this VirtualSpace
  uintx _container_count;
276 277 278 279 280

  // Convenience functions to access the _virtual_space
  char* low()  const { return virtual_space()->low(); }
  char* high() const { return virtual_space()->high(); }

281 282 283 284
  // The first Metachunk will be allocated at the bottom of the
  // VirtualSpace
  Metachunk* first_chunk() { return (Metachunk*) bottom(); }

285 286 287
 public:

  VirtualSpaceNode(size_t byte_size);
288
  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
289 290
  ~VirtualSpaceNode();

291 292 293 294
  // Convenience functions for logical bottom and end
  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }

295 296 297 298
  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }

299 300 301 302 303 304 305 306 307 308 309 310
  // address of next available space in _virtual_space;
  // Accessors
  VirtualSpaceNode* next() { return _next; }
  void set_next(VirtualSpaceNode* v) { _next = v; }

  void set_reserved(MemRegion const v) { _reserved = v; }
  void set_top(MetaWord* v) { _top = v; }

  // Accessors
  MemRegion* reserved() { return &_reserved; }
  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }

311
  // Returns true if "word_size" is available in the VirtualSpace
312 313 314 315 316
  bool is_available(size_t word_size) { return _top + word_size <= end(); }

  MetaWord* top() const { return _top; }
  void inc_top(size_t word_size) { _top += word_size; }

317
  uintx container_count() { return _container_count; }
318
  void inc_container_count();
319 320
  void dec_container_count();
#ifdef ASSERT
321
  uint container_count_slow();
322 323 324
  void verify_container_count();
#endif

325 326 327
  // used and capacity in this single entry in the list
  size_t used_words_in_vs() const;
  size_t capacity_words_in_vs() const;
328
  size_t free_words_in_vs() const;
329 330 331 332 333 334 335 336 337 338 339 340 341

  bool initialize();

  // get space from the virtual space
  Metachunk* take_from_committed(size_t chunk_word_size);

  // Allocate a chunk from the virtual space and return it.
  Metachunk* get_chunk_vs(size_t chunk_word_size);

  // Expands/shrinks the committed space in a virtual space.  Delegates
  // to Virtualspace
  bool expand_by(size_t words, bool pre_touch = false);

342 343 344 345
  // In preparation for deleting this node, remove all the chunks
  // in the node from any freelist.
  void purge(ChunkManager* chunk_manager);

346
#ifdef ASSERT
347 348
  // Debug support
  void mangle();
349
#endif
350 351 352 353 354

  void print_on(outputStream* st) const;
};

  // byte_size is the size of the associated virtualspace.
355
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
356 357 358
  // align up to vm allocation granularity
  byte_size = align_size_up(byte_size, os::vm_allocation_granularity());

359 360 361
  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
  // configurable address, generally at the top of the Java heap so other
  // memory addresses don't conflict.
362
  if (DumpSharedSpaces) {
363
    char* shared_base = (char*)SharedBaseAddress;
364 365
    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
    if (_rs.is_reserved()) {
366
      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
367
    } else {
368
      // Get a mmap region anywhere if the SharedBaseAddress fails.
369 370 371 372 373 374 375 376 377 378
      _rs = ReservedSpace(byte_size);
    }
    MetaspaceShared::set_shared_rs(&_rs);
  } else {
    _rs = ReservedSpace(byte_size);
  }

  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
  Metachunk* chunk = first_chunk();
  Metachunk* invalid_chunk = (Metachunk*) top();
  while (chunk < invalid_chunk ) {
    assert(chunk->is_free(), "Should be marked free");
      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
      chunk_manager->remove_chunk(chunk);
      assert(chunk->next() == NULL &&
             chunk->prev() == NULL,
             "Was not removed from its list");
      chunk = (Metachunk*) next;
  }
}

#ifdef ASSERT
uint VirtualSpaceNode::container_count_slow() {
  uint count = 0;
  Metachunk* chunk = first_chunk();
  Metachunk* invalid_chunk = (Metachunk*) top();
  while (chunk < invalid_chunk ) {
    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
    // Don't count the chunks on the free lists.  Those are
    // still part of the VirtualSpaceNode but not currently
    // counted.
    if (!chunk->is_free()) {
      count++;
    }
    chunk = (Metachunk*) next;
  }
  return count;
}
#endif

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
// List of VirtualSpaces for metadata allocation.
// It has a  _next link for singly linked list and a MemRegion
// for total space in the VirtualSpace.
class VirtualSpaceList : public CHeapObj<mtClass> {
  friend class VirtualSpaceNode;

  enum VirtualSpaceSizes {
    VirtualSpaceSize = 256 * K
  };

  // Global list of virtual spaces
  // Head of the list
  VirtualSpaceNode* _virtual_space_list;
  // virtual space currently being used for allocations
  VirtualSpaceNode* _current_virtual_space;

  // Can this virtual list allocate >1 spaces?  Also, used to determine
  // whether to allocate unlimited small chunks in this virtual space
  bool _is_class;
431
  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
432

433 434 435 436 437
  // Sum of reserved and committed memory in the virtual spaces
  size_t _reserved_words;
  size_t _committed_words;

  // Number of virtual spaces
438 439 440 441 442 443 444 445 446 447 448 449 450
  size_t _virtual_space_count;

  ~VirtualSpaceList();

  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }

  void set_virtual_space_list(VirtualSpaceNode* v) {
    _virtual_space_list = v;
  }
  void set_current_virtual_space(VirtualSpaceNode* v) {
    _current_virtual_space = v;
  }

451
  void link_vs(VirtualSpaceNode* new_entry);
452 453 454 455 456 457 458 459 460 461

  // Get another virtual space and add it to the list.  This
  // is typically prompted by a failed attempt to allocate a chunk
  // and is typically followed by the allocation of a chunk.
  bool grow_vs(size_t vs_word_size);

 public:
  VirtualSpaceList(size_t word_size);
  VirtualSpaceList(ReservedSpace rs);

462 463
  size_t free_bytes();

464 465 466 467
  Metachunk* get_new_chunk(size_t word_size,
                           size_t grow_chunks_by_words,
                           size_t medium_chunk_bunch);

468 469
  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);

470 471 472 473
  // Get the first chunk for a Metaspace.  Used for
  // special cases such as the boot class loader, reflection
  // class loader and anonymous class loader.
  Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
474 475 476 477 478 479 480 481 482 483

  VirtualSpaceNode* current_virtual_space() {
    return _current_virtual_space;
  }

  bool is_class() const { return _is_class; }

  // Allocate the first virtualspace.
  void initialize(size_t word_size);

484 485 486 487
  size_t reserved_words()  { return _reserved_words; }
  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
  size_t committed_words() { return _committed_words; }
  size_t committed_bytes() { return committed_words() * BytesPerWord; }
488

489 490 491 492
  void inc_reserved_words(size_t v);
  void dec_reserved_words(size_t v);
  void inc_committed_words(size_t v);
  void dec_committed_words(size_t v);
493 494 495 496
  void inc_virtual_space_count();
  void dec_virtual_space_count();

  // Unlink empty VirtualSpaceNodes and free it.
497
  void purge(ChunkManager* chunk_manager);
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570

  bool contains(const void *ptr);

  void print_on(outputStream* st) const;

  class VirtualSpaceListIterator : public StackObj {
    VirtualSpaceNode* _virtual_spaces;
   public:
    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
      _virtual_spaces(virtual_spaces) {}

    bool repeat() {
      return _virtual_spaces != NULL;
    }

    VirtualSpaceNode* get_next() {
      VirtualSpaceNode* result = _virtual_spaces;
      if (_virtual_spaces != NULL) {
        _virtual_spaces = _virtual_spaces->next();
      }
      return result;
    }
  };
};

class Metadebug : AllStatic {
  // Debugging support for Metaspaces
  static int _deallocate_block_a_lot_count;
  static int _deallocate_chunk_a_lot_count;
  static int _allocation_fail_alot_count;

 public:
  static int deallocate_block_a_lot_count() {
    return _deallocate_block_a_lot_count;
  }
  static void set_deallocate_block_a_lot_count(int v) {
    _deallocate_block_a_lot_count = v;
  }
  static void inc_deallocate_block_a_lot_count() {
    _deallocate_block_a_lot_count++;
  }
  static int deallocate_chunk_a_lot_count() {
    return _deallocate_chunk_a_lot_count;
  }
  static void reset_deallocate_chunk_a_lot_count() {
    _deallocate_chunk_a_lot_count = 1;
  }
  static void inc_deallocate_chunk_a_lot_count() {
    _deallocate_chunk_a_lot_count++;
  }

  static void init_allocation_fail_alot_count();
#ifdef ASSERT
  static bool test_metadata_failure();
#endif

  static void deallocate_chunk_a_lot(SpaceManager* sm,
                                     size_t chunk_word_size);
  static void deallocate_block_a_lot(SpaceManager* sm,
                                     size_t chunk_word_size);

};

int Metadebug::_deallocate_block_a_lot_count = 0;
int Metadebug::_deallocate_chunk_a_lot_count = 0;
int Metadebug::_allocation_fail_alot_count = 0;

//  SpaceManager - used by Metaspace to handle allocations
class SpaceManager : public CHeapObj<mtClass> {
  friend class Metaspace;
  friend class Metadebug;

 private:
571

572 573 574
  // protects allocations and contains.
  Mutex* const _lock;

575 576 577
  // Type of metadata allocated.
  Metaspace::MetadataType _mdtype;

578 579 580
  // List of chunks in use by this SpaceManager.  Allocations
  // are done from the current chunk.  The list is used for deallocating
  // chunks when the SpaceManager is freed.
581
  Metachunk* _chunks_in_use[NumberOfInUseLists];
582 583 584 585 586 587 588
  Metachunk* _current_chunk;

  // Number of small chunks to allocate to a manager
  // If class space manager, small chunks are unlimited
  static uint const _small_chunk_limit;

  // Sum of all space in allocated chunks
589 590 591 592 593
  size_t _allocated_blocks_words;

  // Sum of all allocated chunks
  size_t _allocated_chunks_words;
  size_t _allocated_chunks_count;
594 595 596 597 598 599 600 601 602 603 604 605

  // Free lists of blocks are per SpaceManager since they
  // are assumed to be in chunks in use by the SpaceManager
  // and all chunks in use by a SpaceManager are freed when
  // the class loader using the SpaceManager is collected.
  BlockFreelist _block_freelists;

  // protects virtualspace and chunk expansions
  static const char*  _expand_lock_name;
  static const int    _expand_lock_rank;
  static Mutex* const _expand_lock;

606
 private:
607 608 609 610 611 612 613 614
  // Accessors
  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
  void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }

  BlockFreelist* block_freelists() const {
    return (BlockFreelist*) &_block_freelists;
  }

615
  Metaspace::MetadataType mdtype() { return _mdtype; }
616 617 618

  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
619 620 621 622 623 624 625 626 627 628

  Metachunk* current_chunk() const { return _current_chunk; }
  void set_current_chunk(Metachunk* v) {
    _current_chunk = v;
  }

  Metachunk* find_current_chunk(size_t word_size);

  // Add chunk to the list of chunks in use
  void add_chunk(Metachunk* v, bool make_current);
629
  void retire_current_chunk();
630 631 632

  Mutex* lock() const { return _lock; }

633 634 635 636 637
  const char* chunk_size_name(ChunkIndex index) const;

 protected:
  void initialize();

638
 public:
639
  SpaceManager(Metaspace::MetadataType mdtype,
640
               Mutex* lock);
641 642
  ~SpaceManager();

643 644
  enum ChunkMultiples {
    MediumChunkMultiple = 4
645 646
  };

647 648
  bool is_class() { return _mdtype == Metaspace::ClassType; }

649
  // Accessors
650
  size_t specialized_chunk_size() { return SpecializedChunk; }
651 652
  size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
  size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
653 654
  size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }

655 656 657 658 659
  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
  size_t allocated_chunks_count() const { return _allocated_chunks_count; }

660
  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
661 662 663

  static Mutex* expand_lock() { return _expand_lock; }

664 665 666 667 668 669 670 671 672 673 674 675
  // Increment the per Metaspace and global running sums for Metachunks
  // by the given size.  This is used when a Metachunk to added to
  // the in-use list.
  void inc_size_metrics(size_t words);
  // Increment the per Metaspace and global running sums Metablocks by the given
  // size.  This is used when a Metablock is allocated.
  void inc_used_metrics(size_t words);
  // Delete the portion of the running sums for this SpaceManager. That is,
  // the globals running sums for the Metachunks and Metablocks are
  // decremented for all the Metachunks in-use by this SpaceManager.
  void dec_total_from_size_metrics();

676 677 678 679 680
  // Set the sizes for the initial chunks.
  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                               size_t* chunk_word_size,
                               size_t* class_chunk_word_size);

681 682 683 684 685 686 687 688 689
  size_t sum_capacity_in_chunks_in_use() const;
  size_t sum_used_in_chunks_in_use() const;
  size_t sum_free_in_chunks_in_use() const;
  size_t sum_waste_in_chunks_in_use() const;
  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;

  size_t sum_count_in_chunks_in_use();
  size_t sum_count_in_chunks_in_use(ChunkIndex i);

690 691
  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);

692 693 694 695 696
  // Block allocation and deallocation.
  // Allocates a block from the current chunk
  MetaWord* allocate(size_t word_size);

  // Helper for allocations
697
  MetaWord* allocate_work(size_t word_size);
698 699

  // Returns a block to the per manager freelist
700
  void deallocate(MetaWord* p, size_t word_size);
701 702 703 704 705 706 707 708

  // Based on the allocation size and a minimum chunk size,
  // returned chunk size (for expanding space for chunk allocation).
  size_t calc_chunk_size(size_t allocation_word_size);

  // Called when an allocation from the current chunk fails.
  // Gets a new chunk (may require getting a new virtual space),
  // and allocates from that chunk.
709
  MetaWord* grow_and_allocate(size_t word_size);
710 711 712 713 714 715 716 717

  // debugging support.

  void dump(outputStream* const out) const;
  void print_on(outputStream* st) const;
  void locked_print_chunks_in_use_on(outputStream* st) const;

  void verify();
718
  void verify_chunk_size(Metachunk* chunk);
719
  NOT_PRODUCT(void mangle_freed_chunks();)
720
#ifdef ASSERT
721
  void verify_allocated_blocks_words();
722
#endif
723 724 725 726 727 728 729

  size_t get_raw_word_size(size_t word_size) {
    // If only the dictionary is going to be used (i.e., no
    // indexed free list), then there is a minimum size requirement.
    // MinChunkSize is a placeholder for the real minimum size JJJ
    size_t byte_size = word_size * BytesPerWord;

730
    size_t raw_bytes_size = MAX2(byte_size,
731 732 733 734 735 736 737
                                 Metablock::min_block_byte_size());
    raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
    size_t raw_word_size = raw_bytes_size / BytesPerWord;
    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");

    return raw_word_size;
  }
738 739 740 741 742 743 744 745 746 747 748 749
};

uint const SpaceManager::_small_chunk_limit = 4;

const char* SpaceManager::_expand_lock_name =
  "SpaceManager chunk allocation lock";
const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
Mutex* const SpaceManager::_expand_lock =
  new Mutex(SpaceManager::_expand_lock_rank,
            SpaceManager::_expand_lock_name,
            Mutex::_allow_vm_block_flag);

750 751 752 753 754
void VirtualSpaceNode::inc_container_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _container_count++;
  assert(_container_count == container_count_slow(),
         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
755
                 " container_count_slow() " SIZE_FORMAT,
756 757 758 759 760 761 762 763 764 765 766 767
                 _container_count, container_count_slow()));
}

void VirtualSpaceNode::dec_container_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _container_count--;
}

#ifdef ASSERT
void VirtualSpaceNode::verify_container_count() {
  assert(_container_count == container_count_slow(),
    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
768
            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
769 770 771
}
#endif

772 773 774 775 776 777 778 779 780 781 782 783 784
// BlockFreelist methods

BlockFreelist::BlockFreelist() : _dictionary(NULL) {}

BlockFreelist::~BlockFreelist() {
  if (_dictionary != NULL) {
    if (Verbose && TraceMetadataChunkAllocation) {
      _dictionary->print_free_lists(gclog_or_tty);
    }
    delete _dictionary;
  }
}

785 786 787 788 789
Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
  Metablock* block = (Metablock*) p;
  block->set_word_size(word_size);
  block->set_prev(NULL);
  block->set_next(NULL);
790 791 792 793

  return block;
}

794 795
void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
  Metablock* free_chunk = initialize_free_chunk(p, word_size);
796
  if (dictionary() == NULL) {
797
   _dictionary = new BlockTreeDictionary();
798
  }
799
  dictionary()->return_chunk(free_chunk);
800 801
}

802
MetaWord* BlockFreelist::get_block(size_t word_size) {
803 804 805 806
  if (dictionary() == NULL) {
    return NULL;
  }

807 808
  if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
    // Dark matter.  Too small for dictionary.
809 810 811
    return NULL;
  }

812
  Metablock* free_block =
813
    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
814 815 816 817
  if (free_block == NULL) {
    return NULL;
  }

818 819 820 821 822 823 824 825 826 827 828 829 830 831
  const size_t block_size = free_block->size();
  if (block_size > WasteMultiplier * word_size) {
    return_block((MetaWord*)free_block, block_size);
    return NULL;
  }

  MetaWord* new_block = (MetaWord*)free_block;
  assert(block_size >= word_size, "Incorrect size of block from freelist");
  const size_t unused = block_size - word_size;
  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
    return_block(new_block + word_size, unused);
  }

  return new_block;
832 833 834 835 836 837 838 839 840 841 842 843 844
}

void BlockFreelist::print_on(outputStream* st) const {
  if (dictionary() == NULL) {
    return;
  }
  dictionary()->print_free_lists(st);
}

// VirtualSpaceNode methods

VirtualSpaceNode::~VirtualSpaceNode() {
  _rs.release();
845 846 847 848
#ifdef ASSERT
  size_t word_size = sizeof(*this) / BytesPerWord;
  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
#endif
849 850 851 852 853 854 855 856 857 858 859
}

size_t VirtualSpaceNode::used_words_in_vs() const {
  return pointer_delta(top(), bottom(), sizeof(MetaWord));
}

// Space committed in the VirtualSpace
size_t VirtualSpaceNode::capacity_words_in_vs() const {
  return pointer_delta(end(), bottom(), sizeof(MetaWord));
}

860 861 862
size_t VirtualSpaceNode::free_words_in_vs() const {
  return pointer_delta(end(), top(), sizeof(MetaWord));
}
863 864 865 866 867 868 869 870 871 872 873

// Allocates the chunk from the virtual space only.
// This interface is also used internally for debugging.  Not all
// chunks removed here are necessarily used for allocation.
Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
  // Bottom of the new chunk
  MetaWord* chunk_limit = top();
  assert(chunk_limit != NULL, "Not safe to call this method");

  if (!is_available(chunk_word_size)) {
    if (TraceMetadataChunkAllocation) {
874
      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
875
      // Dump some information about the virtual space that is nearly full
876
      print_on(gclog_or_tty);
877 878 879 880 881 882 883
    }
    return NULL;
  }

  // Take the space  (bump top on the current virtual space).
  inc_top(chunk_word_size);

884 885
  // Initialize the chunk
  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
886 887 888 889 890 891 892 893 894 895 896
  return result;
}


// Expand the virtual space (commit more of the reserved space)
bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
  size_t bytes = words * BytesPerWord;
  bool result =  virtual_space()->expand_by(bytes, pre_touch);
  if (TraceMetavirtualspaceAllocation && !result) {
    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
                           "for byte size " SIZE_FORMAT, bytes);
897
    virtual_space()->print_on(gclog_or_tty);
898 899 900 901 902 903
  }
  return result;
}

Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
904 905 906 907 908
  Metachunk* result = take_from_committed(chunk_word_size);
  if (result != NULL) {
    inc_container_count();
  }
  return result;
909 910 911 912 913 914 915 916
}

bool VirtualSpaceNode::initialize() {

  if (!_rs.is_reserved()) {
    return false;
  }

917 918 919 920
  // An allocation out of this Virtualspace that is larger
  // than an initial commit size can waste that initial committed
  // space.
  size_t committed_byte_size = 0;
921 922 923 924 925 926
  bool result = virtual_space()->initialize(_rs, committed_byte_size);
  if (result) {
    set_top((MetaWord*)virtual_space()->low());
    set_reserved(MemRegion((HeapWord*)_rs.base(),
                 (HeapWord*)(_rs.base() + _rs.size())));

927 928 929 930 931 932 933 934
    assert(reserved()->start() == (HeapWord*) _rs.base(),
      err_msg("Reserved start was not set properly " PTR_FORMAT
        " != " PTR_FORMAT, reserved()->start(), _rs.base()));
    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
      err_msg("Reserved size was not set properly " SIZE_FORMAT
        " != " SIZE_FORMAT, reserved()->word_size(),
        _rs.size() / BytesPerWord));
  }
935 936 937 938 939 940 941 942 943 944 945

  return result;
}

void VirtualSpaceNode::print_on(outputStream* st) const {
  size_t used = used_words_in_vs();
  size_t capacity = capacity_words_in_vs();
  VirtualSpace* vs = virtual_space();
  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
           "[" PTR_FORMAT ", " PTR_FORMAT ", "
           PTR_FORMAT ", " PTR_FORMAT ")",
946 947
           vs, capacity / K,
           capacity == 0 ? 0 : used * 100 / capacity,
948 949 950 951
           bottom(), top(), end(),
           vs->high_boundary());
}

952
#ifdef ASSERT
953 954 955 956
void VirtualSpaceNode::mangle() {
  size_t word_size = capacity_words_in_vs();
  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
}
957
#endif // ASSERT
958 959 960 961 962 963 964 965 966 967 968 969

// VirtualSpaceList methods
// Space allocated from the VirtualSpace

VirtualSpaceList::~VirtualSpaceList() {
  VirtualSpaceListIterator iter(virtual_space_list());
  while (iter.repeat()) {
    VirtualSpaceNode* vsl = iter.get_next();
    delete vsl;
  }
}

970
void VirtualSpaceList::inc_reserved_words(size_t v) {
971
  assert_lock_strong(SpaceManager::expand_lock());
972
  _reserved_words = _reserved_words + v;
973
}
974
void VirtualSpaceList::dec_reserved_words(size_t v) {
975
  assert_lock_strong(SpaceManager::expand_lock());
976 977 978 979 980 981 982 983 984 985
  _reserved_words = _reserved_words - v;
}

void VirtualSpaceList::inc_committed_words(size_t v) {
  assert_lock_strong(SpaceManager::expand_lock());
  _committed_words = _committed_words + v;
}
void VirtualSpaceList::dec_committed_words(size_t v) {
  assert_lock_strong(SpaceManager::expand_lock());
  _committed_words = _committed_words - v;
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
}

void VirtualSpaceList::inc_virtual_space_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_count++;
}
void VirtualSpaceList::dec_virtual_space_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_count--;
}

void ChunkManager::remove_chunk(Metachunk* chunk) {
  size_t word_size = chunk->word_size();
  ChunkIndex index = list_index(word_size);
  if (index != HumongousIndex) {
    free_chunks(index)->remove_chunk(chunk);
  } else {
    humongous_dictionary()->remove_chunk(chunk);
  }

  // Chunk is being removed from the chunks free list.
  dec_free_chunks_total(chunk->capacity_word_size());
}

// Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count.  Remove Metachunks in
// the node from their respective freelists.
1013
void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
  assert_lock_strong(SpaceManager::expand_lock());
  // Don't use a VirtualSpaceListIterator because this
  // list is being changed and a straightforward use of an iterator is not safe.
  VirtualSpaceNode* purged_vsl = NULL;
  VirtualSpaceNode* prev_vsl = virtual_space_list();
  VirtualSpaceNode* next_vsl = prev_vsl;
  while (next_vsl != NULL) {
    VirtualSpaceNode* vsl = next_vsl;
    next_vsl = vsl->next();
    // Don't free the current virtual space since it will likely
    // be needed soon.
    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
      // Unlink it from the list
      if (prev_vsl == vsl) {
        // This is the case of the current note being the first note.
        assert(vsl == virtual_space_list(), "Expected to be the first note");
        set_virtual_space_list(vsl->next());
      } else {
        prev_vsl->set_next(vsl->next());
      }

1035
      vsl->purge(chunk_manager);
1036 1037
      dec_reserved_words(vsl->reserved_words());
      dec_committed_words(vsl->committed_words());
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
      dec_virtual_space_count();
      purged_vsl = vsl;
      delete vsl;
    } else {
      prev_vsl = vsl;
    }
  }
#ifdef ASSERT
  if (purged_vsl != NULL) {
  // List should be stable enough to use an iterator here.
  VirtualSpaceListIterator iter(virtual_space_list());
    while (iter.repeat()) {
      VirtualSpaceNode* vsl = iter.get_next();
      assert(vsl != purged_vsl, "Purge of vsl failed");
    }
  }
#endif
}

1057 1058 1059 1060
VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
                                   _is_class(false),
                                   _virtual_space_list(NULL),
                                   _current_virtual_space(NULL),
1061 1062
                                   _reserved_words(0),
                                   _committed_words(0),
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
                                   _virtual_space_count(0) {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  bool initialization_succeeded = grow_vs(word_size);
  assert(initialization_succeeded,
    " VirtualSpaceList initialization should not fail");
}

VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
                                   _is_class(true),
                                   _virtual_space_list(NULL),
                                   _current_virtual_space(NULL),
1075 1076
                                   _reserved_words(0),
                                   _committed_words(0),
1077 1078 1079 1080 1081 1082
                                   _virtual_space_count(0) {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  bool succeeded = class_entry->initialize();
  assert(succeeded, " VirtualSpaceList initialization should not fail");
1083
  link_vs(class_entry);
1084 1085
}

1086 1087 1088 1089
size_t VirtualSpaceList::free_bytes() {
  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
}

1090 1091 1092 1093 1094 1095 1096 1097
// Allocate another meta virtual space and add it to the list.
bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
  if (vs_word_size == 0) {
    return false;
  }
  // Reserve the space
  size_t vs_byte_size = vs_word_size * BytesPerWord;
1098
  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
1099 1100 1101 1102 1103 1104 1105

  // Allocate the meta virtual space and initialize it.
  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  if (!new_entry->initialize()) {
    delete new_entry;
    return false;
  } else {
1106
    assert(new_entry->reserved_words() == vs_word_size, "Must be");
1107 1108
    // ensure lock-free iteration sees fully initialized node
    OrderAccess::storestore();
1109
    link_vs(new_entry);
1110 1111 1112 1113
    return true;
  }
}

1114
void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1115 1116 1117 1118 1119 1120
  if (virtual_space_list() == NULL) {
      set_virtual_space_list(new_entry);
  } else {
    current_virtual_space()->set_next(new_entry);
  }
  set_current_virtual_space(new_entry);
1121 1122
  inc_reserved_words(new_entry->reserved_words());
  inc_committed_words(new_entry->committed_words());
1123 1124 1125 1126 1127 1128
  inc_virtual_space_count();
#ifdef ASSERT
  new_entry->mangle();
#endif
  if (TraceMetavirtualspaceAllocation && Verbose) {
    VirtualSpaceNode* vsl = current_virtual_space();
1129
    vsl->print_on(gclog_or_tty);
1130 1131 1132
  }
}

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
  size_t before = node->committed_words();

  bool result = node->expand_by(word_size, pre_touch);

  size_t after = node->committed_words();

  // after and before can be the same if the memory was pre-committed.
  assert(after >= before, "Must be");
  inc_committed_words(after - before);

  return result;
}

1147
Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1148 1149
                                           size_t grow_chunks_by_words,
                                           size_t medium_chunk_bunch) {
1150

1151 1152
  // Allocate a chunk out of the current virtual space.
  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1153 1154 1155 1156

  if (next == NULL) {
    // Not enough room in current virtual space.  Try to commit
    // more space.
1157 1158
    size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
                                     grow_chunks_by_words);
1159 1160 1161 1162
    size_t page_size_words = os::vm_page_size() / BytesPerWord;
    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
                                                        page_size_words);
    bool vs_expanded =
1163
      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
1164 1165 1166 1167 1168 1169
    if (!vs_expanded) {
      // Should the capacity of the metaspaces be expanded for
      // this allocation?  If it's the virtual space for classes and is
      // being used for CompressedHeaders, don't allocate a new virtualspace.
      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
        // Get another virtual space.
1170 1171 1172 1173
        size_t allocation_aligned_expand_words =
            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
        size_t grow_vs_words =
            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
1174 1175
        if (grow_vs(grow_vs_words)) {
          // Got it.  It's on the list now.  Get a chunk from it.
1176
          assert(current_virtual_space()->expanded_words() == 0,
1177
              "New virtual space nodes should not have expanded");
1178 1179 1180 1181 1182 1183

          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
                                                              page_size_words);
          // We probably want to expand by aligned_expand_vs_by_words here.
          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
        }
      } else {
        // Allocation will fail and induce a GC
        if (TraceMetadataChunkAllocation && Verbose) {
          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
            " Fail instead of expand the metaspace");
        }
      }
    } else {
      // The virtual space expanded, get a new chunk
      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
      assert(next != NULL, "Just expanded, should succeed");
    }
  }

1199 1200
  assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
         "New chunk is still on some list");
1201 1202 1203
  return next;
}

1204 1205 1206 1207 1208 1209 1210 1211 1212
Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
                                                      size_t chunk_bunch) {
  // Get a chunk from the chunk freelist
  Metachunk* new_chunk = get_new_chunk(chunk_word_size,
                                       chunk_word_size,
                                       chunk_bunch);
  return new_chunk;
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
void VirtualSpaceList::print_on(outputStream* st) const {
  if (TraceMetadataChunkAllocation && Verbose) {
    VirtualSpaceListIterator iter(virtual_space_list());
    while (iter.repeat()) {
      VirtualSpaceNode* node = iter.get_next();
      node->print_on(st);
    }
  }
}

bool VirtualSpaceList::contains(const void *ptr) {
  VirtualSpaceNode* list = virtual_space_list();
  VirtualSpaceListIterator iter(list);
  while (iter.repeat()) {
    VirtualSpaceNode* node = iter.get_next();
    if (node->reserved()->contains(ptr)) {
      return true;
    }
  }
  return false;
}


// MetaspaceGC methods

// VM_CollectForMetadataAllocation is the vm operation used to GC.
// Within the VM operation after the GC the attempt to allocate the metadata
// should succeed.  If the GC did not free enough space for the metaspace
// allocation, the HWM is increased so that another virtualspace will be
// allocated for the metadata.  With perm gen the increase in the perm
// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
// metaspace policy uses those as the small and large steps for the HWM.
//
// After the GC the compute_new_size() for MetaspaceGC is called to
// resize the capacity of the metaspaces.  The current implementation
1248
// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1249
// to resize the Java heap by some GC's.  New flags can be implemented
1250
// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1251
// free space is desirable in the metaspace capacity to decide how much
1252
// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
// free space is desirable in the metaspace capacity before decreasing
// the HWM.

// Calculate the amount to increase the high water mark (HWM).
// Increase by a minimum amount (MinMetaspaceExpansion) so that
// another expansion is not requested too soon.  If that is not
// enough to satisfy the allocation (i.e. big enough for a word_size
// allocation), increase by MaxMetaspaceExpansion.  If that is still
// not enough, expand by the size of the allocation (word_size) plus
// some.
size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  size_t before_inc = MetaspaceGC::capacity_until_GC();
  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  size_t page_size_words = os::vm_page_size() / BytesPerWord;
  size_t size_delta_words = align_size_up(word_size, page_size_words);
  size_t delta_words = MAX2(size_delta_words, min_delta_words);
  if (delta_words > min_delta_words) {
    // Don't want to hit the high water mark on the next
    // allocation so make the delta greater than just enough
    // for this allocation.
    delta_words = MAX2(delta_words, max_delta_words);
    if (delta_words > max_delta_words) {
      // This allocation is large but the next ones are probably not
      // so increase by the minimum.
      delta_words = delta_words + min_delta_words;
    }
  }
  return delta_words;
}

bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1285

1286
  // If the user wants a limit, impose one.
1287 1288 1289 1290 1291 1292
  // The reason for someone using this flag is to limit reserved space.  So
  // for non-class virtual space, compare against virtual spaces that are reserved.
  // For class virtual space, we only compare against the committed space, not
  // reserved space, because this is a larger space prereserved for compressed
  // class pointers.
  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
1293 1294 1295
    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
    size_t real_allocated     = nonclass_allocated + class_allocated;
1296 1297 1298
    if (real_allocated >= MaxMetaspaceSize) {
      return false;
    }
1299 1300
  }

1301 1302
  // Class virtual space should always be expanded.  Call GC for the other
  // metadata virtual space.
1303 1304
  if (Metaspace::using_class_space() &&
      (vsl == Metaspace::class_space_list())) return true;
1305

1306 1307
  // If this is part of an allocation after a GC, expand
  // unconditionally.
1308
  if (MetaspaceGC::expand_after_GC()) {
1309 1310 1311
    return true;
  }

1312

1313 1314 1315 1316
  // If the capacity is below the minimum capacity, allow the
  // expansion.  Also set the high-water-mark (capacity_until_GC)
  // to that minimum capacity so that a GC will not be induced
  // until that minimum capacity is exceeded.
1317 1318
  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
  size_t metaspace_size_bytes = MetaspaceSize;
1319
  if (committed_capacity_bytes < metaspace_size_bytes ||
1320
      capacity_until_GC() == 0) {
1321
    set_capacity_until_GC(metaspace_size_bytes);
1322 1323
    return true;
  } else {
1324
    if (committed_capacity_bytes < capacity_until_GC()) {
1325 1326 1327 1328 1329
      return true;
    } else {
      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
                        "  capacity_until_GC " SIZE_FORMAT
1330
                        "  allocated_capacity_bytes " SIZE_FORMAT,
1331 1332
                        word_size,
                        capacity_until_GC(),
1333
                        MetaspaceAux::allocated_capacity_bytes());
1334 1335 1336 1337 1338 1339
      }
      return false;
    }
  }
}

1340

1341 1342 1343 1344 1345 1346

void MetaspaceGC::compute_new_size() {
  assert(_shrink_factor <= 100, "invalid shrink factor");
  uint current_shrink_factor = _shrink_factor;
  _shrink_factor = 0;

1347 1348 1349 1350
  // Until a faster way of calculating the "used" quantity is implemented,
  // use "capacity".
  const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1351

1352
  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
  const double maximum_used_percentage = 1.0 - minimum_free_percentage;

  const double min_tmp = used_after_gc / maximum_used_percentage;
  size_t minimum_desired_capacity =
    (size_t)MIN2(min_tmp, double(max_uintx));
  // Don't shrink less than the initial generation size
  minimum_desired_capacity = MAX2(minimum_desired_capacity,
                                  MetaspaceSize);

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
    gclog_or_tty->print_cr("  "
                  "  minimum_free_percentage: %6.2f"
                  "  maximum_used_percentage: %6.2f",
                  minimum_free_percentage,
                  maximum_used_percentage);
    gclog_or_tty->print_cr("  "
1370 1371
                  "   used_after_gc       : %6.1fKB",
                  used_after_gc / (double) K);
1372 1373 1374
  }


1375
  size_t shrink_bytes = 0;
1376 1377 1378 1379 1380 1381
  if (capacity_until_GC < minimum_desired_capacity) {
    // If we have less capacity below the metaspace HWM, then
    // increment the HWM.
    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
    // Don't expand unless it's significant
    if (expand_bytes >= MinMetaspaceExpansion) {
1382
      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
1383 1384
    }
    if (PrintGCDetails && Verbose) {
1385
      size_t new_capacity_until_GC = capacity_until_GC;
1386
      gclog_or_tty->print_cr("    expanding:"
1387 1388 1389 1390
                    "  minimum_desired_capacity: %6.1fKB"
                    "  expand_bytes: %6.1fKB"
                    "  MinMetaspaceExpansion: %6.1fKB"
                    "  new metaspace HWM:  %6.1fKB",
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
                    minimum_desired_capacity / (double) K,
                    expand_bytes / (double) K,
                    MinMetaspaceExpansion / (double) K,
                    new_capacity_until_GC / (double) K);
    }
    return;
  }

  // No expansion, now see if we want to shrink
  // We would never want to shrink more than this
1401 1402 1403
  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
    max_shrink_bytes));
1404 1405

  // Should shrinking be considered?
1406 1407
  if (MaxMetaspaceFreeRatio < 100) {
    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1408 1409 1410 1411 1412
    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
    const double max_tmp = used_after_gc / minimum_used_percentage;
    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
    maximum_desired_capacity = MAX2(maximum_desired_capacity,
                                    MetaspaceSize);
1413
    if (PrintGCDetails && Verbose) {
1414 1415 1416 1417 1418 1419
      gclog_or_tty->print_cr("  "
                             "  maximum_free_percentage: %6.2f"
                             "  minimum_used_percentage: %6.2f",
                             maximum_free_percentage,
                             minimum_used_percentage);
      gclog_or_tty->print_cr("  "
1420 1421
                             "  minimum_desired_capacity: %6.1fKB"
                             "  maximum_desired_capacity: %6.1fKB",
1422 1423 1424 1425 1426 1427 1428 1429 1430
                             minimum_desired_capacity / (double) K,
                             maximum_desired_capacity / (double) K);
    }

    assert(minimum_desired_capacity <= maximum_desired_capacity,
           "sanity check");

    if (capacity_until_GC > maximum_desired_capacity) {
      // Capacity too large, compute shrinking size
1431
      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1432 1433 1434 1435 1436 1437
      // We don't want shrink all the way back to initSize if people call
      // System.gc(), because some programs do that between "phases" and then
      // we'd just have to grow the heap up again for the next phase.  So we
      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
      // on the third call, and 100% by the fourth call.  But if we recompute
      // size without shrinking, it goes back to 0%.
1438 1439
      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
      assert(shrink_bytes <= max_shrink_bytes,
1440
        err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1441
          shrink_bytes, max_shrink_bytes));
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
      if (current_shrink_factor == 0) {
        _shrink_factor = 10;
      } else {
        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
      }
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("  "
                      "  shrinking:"
                      "  initSize: %.1fK"
                      "  maximum_desired_capacity: %.1fK",
                      MetaspaceSize / (double) K,
                      maximum_desired_capacity / (double) K);
        gclog_or_tty->print_cr("  "
1455
                      "  shrink_bytes: %.1fK"
1456 1457 1458
                      "  current_shrink_factor: %d"
                      "  new shrink factor: %d"
                      "  MinMetaspaceExpansion: %.1fK",
1459
                      shrink_bytes / (double) K,
1460 1461 1462 1463 1464 1465 1466 1467
                      current_shrink_factor,
                      _shrink_factor,
                      MinMetaspaceExpansion / (double) K);
      }
    }
  }

  // Don't shrink unless it's significant
1468 1469 1470
  if (shrink_bytes >= MinMetaspaceExpansion &&
      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
  }
}

// Metadebug methods

void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
                                       size_t chunk_word_size){
#ifdef ASSERT
  VirtualSpaceList* vsl = sm->vs_list();
  if (MetaDataDeallocateALot &&
      Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
    Metadebug::reset_deallocate_chunk_a_lot_count();
    for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
      Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
      if (dummy_chunk == NULL) {
        break;
      }
1488
      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1489 1490 1491 1492 1493 1494

      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
                               sm->sum_count_in_chunks_in_use());
        dummy_chunk->print_on(gclog_or_tty);
        gclog_or_tty->print_cr("  Free chunks total %d  count %d",
1495 1496
                               sm->chunk_manager()->free_chunks_total_words(),
                               sm->chunk_manager()->free_chunks_count());
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
      }
    }
  } else {
    Metadebug::inc_deallocate_chunk_a_lot_count();
  }
#endif
}

void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
                                       size_t raw_word_size){
#ifdef ASSERT
  if (MetaDataDeallocateALot &&
        Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
    Metadebug::set_deallocate_block_a_lot_count(0);
    for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1512
      MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1513 1514 1515
      if (dummy_block == 0) {
        break;
      }
1516
      sm->deallocate(dummy_block, raw_word_size);
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
    }
  } else {
    Metadebug::inc_deallocate_block_a_lot_count();
  }
#endif
}

void Metadebug::init_allocation_fail_alot_count() {
  if (MetadataAllocationFailALot) {
    _allocation_fail_alot_count =
      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
  }
}

#ifdef ASSERT
bool Metadebug::test_metadata_failure() {
  if (MetadataAllocationFailALot &&
      Threads::is_vm_complete()) {
    if (_allocation_fail_alot_count > 0) {
      _allocation_fail_alot_count--;
    } else {
      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print_cr("Metadata allocation failing for "
                               "MetadataAllocationFailALot");
      }
      init_allocation_fail_alot_count();
      return true;
    }
  }
  return false;
}
#endif

// ChunkManager methods

E
ehelin 已提交
1552
size_t ChunkManager::free_chunks_total_words() {
1553 1554 1555
  return _free_chunks_total;
}

E
ehelin 已提交
1556 1557
size_t ChunkManager::free_chunks_total_bytes() {
  return free_chunks_total_words() * BytesPerWord;
1558 1559 1560 1561 1562 1563 1564 1565 1566
}

size_t ChunkManager::free_chunks_count() {
#ifdef ASSERT
  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
    MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
    // This lock is only needed in debug because the verification
    // of the _free_chunks_totals walks the list of free chunks
1567
    slow_locked_verify_free_chunks_count();
1568 1569
  }
#endif
1570
  return _free_chunks_count;
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
}

void ChunkManager::locked_verify_free_chunks_total() {
  assert_lock_strong(SpaceManager::expand_lock());
  assert(sum_free_chunks() == _free_chunks_total,
    err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
           " same as sum " SIZE_FORMAT, _free_chunks_total,
           sum_free_chunks()));
}

void ChunkManager::verify_free_chunks_total() {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify_free_chunks_total();
}

void ChunkManager::locked_verify_free_chunks_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  assert(sum_free_chunks_count() == _free_chunks_count,
    err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
           " same as sum " SIZE_FORMAT, _free_chunks_count,
           sum_free_chunks_count()));
}

void ChunkManager::verify_free_chunks_count() {
#ifdef ASSERT
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify_free_chunks_count();
#endif
}

void ChunkManager::verify() {
1604 1605 1606
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify();
1607 1608 1609 1610
}

void ChunkManager::locked_verify() {
  locked_verify_free_chunks_count();
1611
  locked_verify_free_chunks_total();
1612 1613 1614 1615
}

void ChunkManager::locked_print_free_chunks(outputStream* st) {
  assert_lock_strong(SpaceManager::expand_lock());
1616
  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1617 1618 1619 1620 1621
                _free_chunks_total, _free_chunks_count);
}

void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
  assert_lock_strong(SpaceManager::expand_lock());
1622
  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
                sum_free_chunks(), sum_free_chunks_count());
}
ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
  return &_free_chunks[index];
}

// These methods that sum the free chunk lists are used in printing
// methods that are used in product builds.
size_t ChunkManager::sum_free_chunks() {
  assert_lock_strong(SpaceManager::expand_lock());
  size_t result = 0;
1634
  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1635 1636 1637 1638 1639 1640
    ChunkList* list = free_chunks(i);

    if (list == NULL) {
      continue;
    }

1641
    result = result + list->count() * list->size();
1642
  }
1643
  result = result + humongous_dictionary()->total_size();
1644 1645 1646 1647 1648 1649
  return result;
}

size_t ChunkManager::sum_free_chunks_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  size_t count = 0;
1650
  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1651 1652 1653 1654
    ChunkList* list = free_chunks(i);
    if (list == NULL) {
      continue;
    }
1655
    count = count + list->count();
1656
  }
1657
  count = count + humongous_dictionary()->total_free_blocks();
1658 1659 1660 1661
  return count;
}

ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1662 1663 1664
  ChunkIndex index = list_index(word_size);
  assert(index < HumongousIndex, "No humongous list");
  return free_chunks(index);
1665 1666 1667 1668 1669 1670 1671 1672 1673
}

void ChunkManager::free_chunks_put(Metachunk* chunk) {
  assert_lock_strong(SpaceManager::expand_lock());
  ChunkList* free_list = find_free_chunks_list(chunk->word_size());
  chunk->set_next(free_list->head());
  free_list->set_head(chunk);
  // chunk is being returned to the chunk free list
  inc_free_chunks_total(chunk->capacity_word_size());
1674
  slow_locked_verify();
1675 1676 1677 1678 1679 1680 1681
}

void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
  // The deallocation of a chunk originates in the freelist
  // manangement code for a Metaspace and does not hold the
  // lock.
  assert(chunk != NULL, "Deallocating NULL");
1682 1683
  assert_lock_strong(SpaceManager::expand_lock());
  slow_locked_verify();
1684
  if (TraceMetadataChunkAllocation) {
1685 1686 1687
    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
                           PTR_FORMAT "  size " SIZE_FORMAT,
                           chunk, chunk->word_size());
1688 1689 1690 1691 1692 1693 1694
  }
  free_chunks_put(chunk);
}

Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  assert_lock_strong(SpaceManager::expand_lock());

1695
  slow_locked_verify();
1696

1697
  Metachunk* chunk = NULL;
1698
  if (list_index(word_size) != HumongousIndex) {
1699 1700
    ChunkList* free_list = find_free_chunks_list(word_size);
    assert(free_list != NULL, "Sanity check");
1701

1702 1703 1704 1705 1706 1707
    chunk = free_list->head();
    debug_only(Metachunk* debug_head = chunk;)

    if (chunk == NULL) {
      return NULL;
    }
1708 1709

    // Remove the chunk as the head of the list.
1710
    free_list->remove_chunk(chunk);
1711 1712

    // Chunk is being removed from the chunks free list.
1713
    dec_free_chunks_total(chunk->capacity_word_size());
1714 1715

    if (TraceMetadataChunkAllocation && Verbose) {
1716 1717 1718
      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
                             free_list, chunk, chunk->word_size());
1719 1720
    }
  } else {
1721 1722 1723 1724 1725 1726 1727
    chunk = humongous_dictionary()->get_chunk(
      word_size,
      FreeBlockDictionary<Metachunk>::atLeast);

    if (chunk != NULL) {
      if (TraceMetadataHumongousAllocation) {
        size_t waste = chunk->word_size() - word_size;
1728 1729 1730 1731
        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
                               SIZE_FORMAT " for requested size " SIZE_FORMAT
                               " waste " SIZE_FORMAT,
                               chunk->word_size(), word_size, waste);
1732
      }
1733 1734
      // Chunk is being removed from the chunks free list.
      dec_free_chunks_total(chunk->capacity_word_size());
1735 1736
    } else {
      return NULL;
1737 1738
    }
  }
1739 1740 1741 1742

  // Remove it from the links to this freelist
  chunk->set_next(NULL);
  chunk->set_prev(NULL);
1743 1744 1745 1746 1747
#ifdef ASSERT
  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
  // work.
  chunk->set_is_free(false);
#endif
1748 1749
  chunk->container()->inc_container_count();

1750
  slow_locked_verify();
1751 1752 1753 1754 1755
  return chunk;
}

Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
1756
  slow_locked_verify();
1757 1758 1759 1760 1761 1762 1763

  // Take from the beginning of the list
  Metachunk* chunk = free_chunks_get(word_size);
  if (chunk == NULL) {
    return NULL;
  }

1764 1765 1766
  assert((word_size <= chunk->word_size()) ||
         list_index(chunk->word_size() == HumongousIndex),
         "Non-humongous variable sized chunk");
1767
  if (TraceMetadataChunkAllocation) {
1768 1769 1770
    size_t list_count;
    if (list_index(word_size) < HumongousIndex) {
      ChunkList* list = find_free_chunks_list(word_size);
1771
      list_count = list->count();
1772 1773 1774
    } else {
      list_count = humongous_dictionary()->total_count();
    }
1775 1776 1777 1778
    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
                        this, chunk, chunk->word_size(), list_count);
    locked_print_free_chunks(gclog_or_tty);
1779 1780 1781 1782 1783
  }

  return chunk;
}

1784
void ChunkManager::print_on(outputStream* out) const {
1785
  if (PrintFLSStatistics != 0) {
1786
    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1787 1788 1789
  }
}

1790 1791
// SpaceManager methods

1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                                           size_t* chunk_word_size,
                                           size_t* class_chunk_word_size) {
  switch (type) {
  case Metaspace::BootMetaspaceType:
    *chunk_word_size = Metaspace::first_chunk_word_size();
    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
    break;
  case Metaspace::ROMetaspaceType:
    *chunk_word_size = SharedReadOnlySize / wordSize;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  case Metaspace::ReadWriteMetaspaceType:
    *chunk_word_size = SharedReadWriteSize / wordSize;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  case Metaspace::AnonymousMetaspaceType:
  case Metaspace::ReflectionMetaspaceType:
    *chunk_word_size = SpecializedChunk;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  default:
    *chunk_word_size = SmallChunk;
    *class_chunk_word_size = ClassSmallChunk;
    break;
  }
1818
  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1819 1820
    err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
            " class " SIZE_FORMAT,
1821
            *chunk_word_size, *class_chunk_word_size));
1822 1823
}

1824 1825 1826
size_t SpaceManager::sum_free_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t free = 0;
1827
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
    Metachunk* chunk = chunks_in_use(i);
    while (chunk != NULL) {
      free += chunk->free_word_size();
      chunk = chunk->next();
    }
  }
  return free;
}

size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t result = 0;
1840
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1841 1842
   result += sum_waste_in_chunks_in_use(i);
  }
1843

1844 1845 1846 1847 1848 1849 1850 1851
  return result;
}

size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
  size_t result = 0;
  Metachunk* chunk = chunks_in_use(index);
  // Count the free space in all the chunk but not the
  // current chunk from which allocations are still being done.
1852 1853
  while (chunk != NULL) {
    if (chunk != current_chunk()) {
1854
      result += chunk->free_word_size();
1855
    }
1856
    chunk = chunk->next();
1857 1858 1859 1860 1861
  }
  return result;
}

size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
  // For CMS use "allocated_chunks_words()" which does not need the
  // Metaspace lock.  For the other collectors sum over the
  // lists.  Use both methods as a check that "allocated_chunks_words()"
  // is correct.  That is, sum_capacity_in_chunks() is too expensive
  // to use in the product and allocated_chunks_words() should be used
  // but allow for  checking that allocated_chunks_words() returns the same
  // value as sum_capacity_in_chunks_in_use() which is the definitive
  // answer.
  if (UseConcMarkSweepGC) {
    return allocated_chunks_words();
  } else {
    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
    size_t sum = 0;
    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
      Metachunk* chunk = chunks_in_use(i);
      while (chunk != NULL) {
        sum += chunk->capacity_word_size();
        chunk = chunk->next();
      }
1881 1882
    }
  return sum;
1883
  }
1884 1885 1886 1887
}

size_t SpaceManager::sum_count_in_chunks_in_use() {
  size_t count = 0;
1888
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1889 1890
    count = count + sum_count_in_chunks_in_use(i);
  }
1891

1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
  return count;
}

size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
  size_t count = 0;
  Metachunk* chunk = chunks_in_use(i);
  while (chunk != NULL) {
    count++;
    chunk = chunk->next();
  }
  return count;
}


size_t SpaceManager::sum_used_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t used = 0;
1909
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
    Metachunk* chunk = chunks_in_use(i);
    while (chunk != NULL) {
      used += chunk->used_word_size();
      chunk = chunk->next();
    }
  }
  return used;
}

void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {

1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
    Metachunk* chunk = chunks_in_use(i);
    st->print("SpaceManager: %s " PTR_FORMAT,
                 chunk_size_name(i), chunk);
    if (chunk != NULL) {
      st->print_cr(" free " SIZE_FORMAT,
                   chunk->free_word_size());
    } else {
      st->print_cr("");
    }
  }
1932

1933 1934
  chunk_manager()->locked_print_free_chunks(st);
  chunk_manager()->locked_print_sum_free_chunks(st);
1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
}

size_t SpaceManager::calc_chunk_size(size_t word_size) {

  // Decide between a small chunk and a medium chunk.  Up to
  // _small_chunk_limit small chunks can be allocated but
  // once a medium chunk has been allocated, no more small
  // chunks will be allocated.
  size_t chunk_word_size;
  if (chunks_in_use(MediumIndex) == NULL &&
1945
      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1946 1947 1948
    chunk_word_size = (size_t) small_chunk_size();
    if (word_size + Metachunk::overhead() > small_chunk_size()) {
      chunk_word_size = medium_chunk_size();
1949 1950
    }
  } else {
1951
    chunk_word_size = medium_chunk_size();
1952 1953
  }

1954 1955 1956 1957 1958 1959
  // Might still need a humongous chunk.  Enforce an
  // eight word granularity to facilitate reuse (some
  // wastage but better chance of reuse).
  size_t if_humongous_sized_chunk =
    align_size_up(word_size + Metachunk::overhead(),
                  HumongousChunkGranularity);
1960
  chunk_word_size =
1961
    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1962

1963 1964 1965 1966 1967
  assert(!SpaceManager::is_humongous(word_size) ||
         chunk_word_size == if_humongous_sized_chunk,
         err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
                 " chunk_word_size " SIZE_FORMAT,
                 word_size, chunk_word_size));
1968 1969 1970 1971 1972 1973
  if (TraceMetadataHumongousAllocation &&
      SpaceManager::is_humongous(word_size)) {
    gclog_or_tty->print_cr("Metadata humongous allocation:");
    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
                           chunk_word_size);
1974
    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
1975 1976 1977 1978 1979
                           Metachunk::overhead());
  }
  return chunk_word_size;
}

1980
MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1981 1982 1983 1984 1985 1986 1987 1988
  assert(vs_list()->current_virtual_space() != NULL,
         "Should have been set");
  assert(current_chunk() == NULL ||
         current_chunk()->allocate(word_size) == NULL,
         "Don't need to expand");
  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);

  if (TraceMetadataChunkAllocation && Verbose) {
1989 1990 1991 1992 1993 1994
    size_t words_left = 0;
    size_t words_used = 0;
    if (current_chunk() != NULL) {
      words_left = current_chunk()->free_word_size();
      words_used = current_chunk()->used_word_size();
    }
1995
    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
1996 1997 1998
                           " words " SIZE_FORMAT " words used " SIZE_FORMAT
                           " words left",
                            word_size, words_used, words_left);
1999 2000 2001 2002
  }

  // Get another chunk out of the virtual space
  size_t grow_chunks_by_words = calc_chunk_size(word_size);
2003
  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017

  // If a chunk was available, add it to the in-use chunk list
  // and do an allocation from it.
  if (next != NULL) {
    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
    // Add to this manager's list of chunks in use.
    add_chunk(next, false);
    return next->allocate(word_size);
  }
  return NULL;
}

void SpaceManager::print_on(outputStream* st) const {

2018
  for (ChunkIndex i = ZeroIndex;
2019
       i < NumberOfInUseLists ;
2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
       i = next_chunk_index(i) ) {
    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
                 chunks_in_use(i),
                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
  }
  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
               " Humongous " SIZE_FORMAT,
               sum_waste_in_chunks_in_use(SmallIndex),
               sum_waste_in_chunks_in_use(MediumIndex),
               sum_waste_in_chunks_in_use(HumongousIndex));
2030 2031 2032 2033 2034
  // block free lists
  if (block_freelists() != NULL) {
    st->print_cr("total in block free lists " SIZE_FORMAT,
      block_freelists()->total_size());
  }
2035 2036
}

2037
SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2038
                           Mutex* lock) :
2039
  _mdtype(mdtype),
2040 2041 2042
  _allocated_blocks_words(0),
  _allocated_chunks_words(0),
  _allocated_chunks_count(0),
2043 2044 2045 2046 2047
  _lock(lock)
{
  initialize();
}

2048 2049 2050 2051 2052 2053 2054
void SpaceManager::inc_size_metrics(size_t words) {
  assert_lock_strong(SpaceManager::expand_lock());
  // Total of allocated Metachunks and allocated Metachunks count
  // for each SpaceManager
  _allocated_chunks_words = _allocated_chunks_words + words;
  _allocated_chunks_count++;
  // Global total of capacity in allocated Metachunks
2055
  MetaspaceAux::inc_capacity(mdtype(), words);
2056 2057 2058 2059 2060
  // Global total of allocated Metablocks.
  // used_words_slow() includes the overhead in each
  // Metachunk so include it in the used when the
  // Metachunk is first added (so only added once per
  // Metachunk).
2061
  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2062 2063 2064 2065 2066 2067
}

void SpaceManager::inc_used_metrics(size_t words) {
  // Add to the per SpaceManager total
  Atomic::add_ptr(words, &_allocated_blocks_words);
  // Add to the global total
2068
  MetaspaceAux::inc_used(mdtype(), words);
2069 2070 2071
}

void SpaceManager::dec_total_from_size_metrics() {
2072 2073
  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2074
  // Also deduct the overhead per Metachunk
2075
  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2076 2077
}

2078
void SpaceManager::initialize() {
2079
  Metadebug::init_allocation_fail_alot_count();
2080
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2081 2082 2083 2084 2085 2086 2087 2088
    _chunks_in_use[i] = NULL;
  }
  _current_chunk = NULL;
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
  }
}

2089 2090 2091 2092 2093 2094 2095 2096 2097
void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
  if (chunks == NULL) {
    return;
  }
  ChunkList* list = free_chunks(index);
  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
  assert_lock_strong(SpaceManager::expand_lock());
  Metachunk* cur = chunks;

2098
  // This returns chunks one at a time.  If a new
2099 2100 2101 2102
  // class List can be created that is a base class
  // of FreeList then something like FreeList::prepend()
  // can be used in place of this loop
  while (cur != NULL) {
2103 2104
    assert(cur->container() != NULL, "Container should have been set");
    cur->container()->dec_container_count();
2105 2106 2107 2108 2109 2110 2111 2112 2113
    // Capture the next link before it is changed
    // by the call to return_chunk_at_head();
    Metachunk* next = cur->next();
    cur->set_is_free(true);
    list->return_chunk_at_head(cur);
    cur = next;
  }
}

2114
SpaceManager::~SpaceManager() {
2115
  // This call this->_lock which can't be done while holding expand_lock()
2116 2117 2118 2119
  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
            " allocated_chunks_words() " SIZE_FORMAT,
            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2120

2121 2122 2123
  MutexLockerEx fcl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);

2124
  chunk_manager()->slow_locked_verify();
2125

2126 2127
  dec_total_from_size_metrics();

2128 2129 2130 2131 2132
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
    locked_print_chunks_in_use_on(gclog_or_tty);
  }

2133 2134
  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
  // is during the freeing of a VirtualSpaceNodes.
2135

2136 2137
  // Have to update before the chunks_in_use lists are emptied
  // below.
2138 2139
  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
                                         sum_count_in_chunks_in_use());
2140 2141 2142 2143

  // Add all the chunks in use by this space manager
  // to the global list of free chunks.

2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
  // Follow each list of chunks-in-use and add them to the
  // free lists.  Each list is NULL terminated.

  for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print_cr("returned %d %s chunks to freelist",
                             sum_count_in_chunks_in_use(i),
                             chunk_size_name(i));
    }
    Metachunk* chunks = chunks_in_use(i);
2154
    chunk_manager()->return_chunks(i, chunks);
2155 2156 2157
    set_chunks_in_use(i, NULL);
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print_cr("updated freelist count %d %s",
2158
                             chunk_manager()->free_chunks(i)->count(),
2159 2160 2161
                             chunk_size_name(i));
    }
    assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2162 2163
  }

2164 2165 2166 2167
  // The medium chunk case may be optimized by passing the head and
  // tail of the medium chunk list to add_at_head().  The tail is often
  // the current chunk but there are probably exceptions.

2168
  // Humongous chunks
2169 2170 2171 2172 2173 2174
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
                            sum_count_in_chunks_in_use(HumongousIndex),
                            chunk_size_name(HumongousIndex));
    gclog_or_tty->print("Humongous chunk dictionary: ");
  }
2175 2176 2177
  // Humongous chunks are never the current chunk.
  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);

2178 2179 2180 2181
  while (humongous_chunks != NULL) {
#ifdef ASSERT
    humongous_chunks->set_is_free(true);
#endif
2182 2183 2184 2185 2186 2187 2188 2189 2190
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
                          humongous_chunks,
                          humongous_chunks->word_size());
    }
    assert(humongous_chunks->word_size() == (size_t)
           align_size_up(humongous_chunks->word_size(),
                             HumongousChunkGranularity),
           err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2191
                   " granularity %d",
2192
                   humongous_chunks->word_size(), HumongousChunkGranularity));
2193
    Metachunk* next_humongous_chunks = humongous_chunks->next();
2194
    humongous_chunks->container()->dec_container_count();
2195
    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2196
    humongous_chunks = next_humongous_chunks;
2197
  }
2198 2199 2200
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("");
    gclog_or_tty->print_cr("updated dictionary count %d %s",
2201
                     chunk_manager()->humongous_dictionary()->total_count(),
2202 2203
                     chunk_size_name(HumongousIndex));
  }
2204
  chunk_manager()->slow_locked_verify();
2205 2206
}

2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  switch (index) {
    case SpecializedIndex:
      return "Specialized";
    case SmallIndex:
      return "Small";
    case MediumIndex:
      return "Medium";
    case HumongousIndex:
      return "Humongous";
    default:
      return NULL;
  }
}

ChunkIndex ChunkManager::list_index(size_t size) {
  switch (size) {
    case SpecializedChunk:
      assert(SpecializedChunk == ClassSpecializedChunk,
             "Need branch for ClassSpecializedChunk");
      return SpecializedIndex;
    case SmallChunk:
    case ClassSmallChunk:
      return SmallIndex;
    case MediumChunk:
    case ClassMediumChunk:
      return MediumIndex;
    default:
2235
      assert(size > MediumChunk || size > ClassMediumChunk,
2236 2237 2238 2239 2240
             "Not a humongous chunk");
      return HumongousIndex;
  }
}

2241
void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2242
  assert_lock_strong(_lock);
2243
  size_t raw_word_size = get_raw_word_size(word_size);
2244
  size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2245
  assert(raw_word_size >= min_size,
2246
         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2247
  block_freelists()->return_block(p, raw_word_size);
2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
}

// Adds a chunk to the list of chunks in use.
void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {

  assert(new_chunk != NULL, "Should not be NULL");
  assert(new_chunk->next() == NULL, "Should not be on a list");

  new_chunk->reset_empty();

  // Find the correct list and and set the current
  // chunk for that list.
2260
  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2261

2262
  if (index != HumongousIndex) {
2263
    retire_current_chunk();
2264
    set_current_chunk(new_chunk);
2265 2266 2267
    new_chunk->set_next(chunks_in_use(index));
    set_chunks_in_use(index, new_chunk);
  } else {
2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
    // For null class loader data and DumpSharedSpaces, the first chunk isn't
    // small, so small will be null.  Link this first chunk as the current
    // chunk.
    if (make_current) {
      // Set as the current chunk but otherwise treat as a humongous chunk.
      set_current_chunk(new_chunk);
    }
    // Link at head.  The _current_chunk only points to a humongous chunk for
    // the null class loader metaspace (class and data virtual space managers)
    // any humongous chunks so will not point to the tail
    // of the humongous chunks list.
    new_chunk->set_next(chunks_in_use(HumongousIndex));
    set_chunks_in_use(HumongousIndex, new_chunk);

2282
    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2283 2284
  }

2285 2286 2287
  // Add to the running sum of capacity
  inc_size_metrics(new_chunk->word_size());

2288 2289 2290 2291 2292
  assert(new_chunk->is_empty(), "Not ready for reuse");
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
                        sum_count_in_chunks_in_use());
    new_chunk->print_on(gclog_or_tty);
2293
    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2294 2295 2296
  }
}

2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
void SpaceManager::retire_current_chunk() {
  if (current_chunk() != NULL) {
    size_t remaining_words = current_chunk()->free_word_size();
    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
      inc_used_metrics(remaining_words);
    }
  }
}

2307 2308
Metachunk* SpaceManager::get_new_chunk(size_t word_size,
                                       size_t grow_chunks_by_words) {
2309 2310
  // Get a chunk from the chunk freelist
  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2311

2312 2313 2314 2315 2316
  if (next == NULL) {
    next = vs_list()->get_new_chunk(word_size,
                                    grow_chunks_by_words,
                                    medium_chunk_bunch());
  }
2317

S
stefank 已提交
2318
  if (TraceMetadataHumongousAllocation && next != NULL &&
2319
      SpaceManager::is_humongous(next->word_size())) {
S
stefank 已提交
2320 2321
    gclog_or_tty->print_cr("  new humongous chunk word size "
                           PTR_FORMAT, next->word_size());
2322 2323 2324 2325 2326
  }

  return next;
}

2327 2328 2329
MetaWord* SpaceManager::allocate(size_t word_size) {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);

2330
  size_t raw_word_size = get_raw_word_size(word_size);
2331
  BlockFreelist* fl =  block_freelists();
2332
  MetaWord* p = NULL;
2333 2334 2335 2336 2337
  // Allocation from the dictionary is expensive in the sense that
  // the dictionary has to be searched for a size.  Don't allocate
  // from the dictionary until it starts to get fat.  Is this
  // a reasonable policy?  Maybe an skinny dictionary is fast enough
  // for allocations.  Do some profiling.  JJJ
2338 2339
  if (fl->total_size() > allocation_from_dictionary_limit) {
    p = fl->get_block(raw_word_size);
2340
  }
2341 2342
  if (p == NULL) {
    p = allocate_work(raw_word_size);
2343 2344 2345
  }
  Metadebug::deallocate_block_a_lot(this, raw_word_size);

2346
  return p;
2347 2348 2349 2350
}

// Returns the address of spaced allocated for "word_size".
// This methods does not know about blocks (Metablocks)
2351
MetaWord* SpaceManager::allocate_work(size_t word_size) {
2352 2353 2354 2355 2356 2357 2358
  assert_lock_strong(_lock);
#ifdef ASSERT
  if (Metadebug::test_metadata_failure()) {
    return NULL;
  }
#endif
  // Is there space in the current chunk?
2359
  MetaWord* result = NULL;
2360 2361 2362 2363 2364 2365

  // For DumpSharedSpaces, only allocate out of the current chunk which is
  // never null because we gave it the size we wanted.   Caller reports out
  // of memory if this returns null.
  if (DumpSharedSpaces) {
    assert(current_chunk() != NULL, "should never happen");
2366
    inc_used_metrics(word_size);
2367 2368 2369 2370 2371 2372 2373 2374 2375
    return current_chunk()->allocate(word_size); // caller handles null result
  }
  if (current_chunk() != NULL) {
    result = current_chunk()->allocate(word_size);
  }

  if (result == NULL) {
    result = grow_and_allocate(word_size);
  }
2376
  if (result != 0) {
2377
    inc_used_metrics(word_size);
2378 2379
    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
           "Head of the list is being allocated");
2380 2381 2382 2383 2384 2385 2386 2387 2388
  }

  return result;
}

void SpaceManager::verify() {
  // If there are blocks in the dictionary, then
  // verfication of chunks does not work since
  // being in the dictionary alters a chunk.
2389
  if (block_freelists()->total_size() == 0) {
2390
    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2391 2392 2393
      Metachunk* curr = chunks_in_use(i);
      while (curr != NULL) {
        curr->verify();
2394
        verify_chunk_size(curr);
2395 2396 2397 2398 2399 2400
        curr = curr->next();
      }
    }
  }
}

2401 2402
void SpaceManager::verify_chunk_size(Metachunk* chunk) {
  assert(is_humongous(chunk->word_size()) ||
2403 2404 2405
         chunk->word_size() == medium_chunk_size() ||
         chunk->word_size() == small_chunk_size() ||
         chunk->word_size() == specialized_chunk_size(),
2406 2407 2408 2409
         "Chunk size is wrong");
  return;
}

2410
#ifdef ASSERT
2411
void SpaceManager::verify_allocated_blocks_words() {
2412
  // Verification is only guaranteed at a safepoint.
2413 2414 2415
  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
    "Verification can fail if the applications is running");
  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2416 2417
    err_msg("allocation total is not consistent " SIZE_FORMAT
            " vs " SIZE_FORMAT,
2418
            allocated_blocks_words(), sum_used_in_chunks_in_use()));
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
}

#endif

void SpaceManager::dump(outputStream* const out) const {
  size_t curr_total = 0;
  size_t waste = 0;
  uint i = 0;
  size_t used = 0;
  size_t capacity = 0;

  // Add up statistics for all chunks in this SpaceManager.
2431
  for (ChunkIndex index = ZeroIndex;
2432
       index < NumberOfInUseLists;
2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
       index = next_chunk_index(index)) {
    for (Metachunk* curr = chunks_in_use(index);
         curr != NULL;
         curr = curr->next()) {
      out->print("%d) ", i++);
      curr->print_on(out);
      curr_total += curr->word_size();
      used += curr->used_word_size();
      capacity += curr->capacity_word_size();
      waste += curr->free_word_size() + curr->overhead();;
    }
  }

S
stefank 已提交
2446 2447 2448 2449
  if (TraceMetadataChunkAllocation && Verbose) {
    block_freelists()->print_on(out);
  }

2450
  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2451 2452 2453 2454 2455 2456 2457 2458
  // Free space isn't wasted.
  waste -= free;

  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
}

2459
#ifndef PRODUCT
2460
void SpaceManager::mangle_freed_chunks() {
2461
  for (ChunkIndex index = ZeroIndex;
2462
       index < NumberOfInUseLists;
2463 2464 2465 2466 2467 2468 2469 2470
       index = next_chunk_index(index)) {
    for (Metachunk* curr = chunks_in_use(index);
         curr != NULL;
         curr = curr->next()) {
      curr->mangle();
    }
  }
}
2471
#endif // PRODUCT
2472 2473 2474

// MetaspaceAux

2475

2476 2477
size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2478

2479 2480 2481 2482 2483
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  return list == NULL ? 0 : list->free_bytes();
}

2484
size_t MetaspaceAux::free_bytes() {
2485
  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2486 2487
}

2488
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2489
  assert_lock_strong(SpaceManager::expand_lock());
2490
  assert(words <= allocated_capacity_words(mdtype),
2491
    err_msg("About to decrement below 0: words " SIZE_FORMAT
2492 2493 2494
            " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
            words, mdtype, allocated_capacity_words(mdtype)));
  _allocated_capacity_words[mdtype] -= words;
2495 2496
}

2497
void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2498 2499
  assert_lock_strong(SpaceManager::expand_lock());
  // Needs to be atomic
2500
  _allocated_capacity_words[mdtype] += words;
2501 2502
}

2503 2504
void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
  assert(words <= allocated_used_words(mdtype),
2505
    err_msg("About to decrement below 0: words " SIZE_FORMAT
2506 2507
            " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
            words, mdtype, allocated_used_words(mdtype)));
2508 2509 2510 2511 2512
  // For CMS deallocation of the Metaspaces occurs during the
  // sweep which is a concurrent phase.  Protection by the expand_lock()
  // is not enough since allocation is on a per Metaspace basis
  // and protected by the Metaspace lock.
  jlong minus_words = (jlong) - (jlong) words;
2513
  Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2514 2515
}

2516
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2517 2518 2519 2520
  // _allocated_used_words tracks allocations for
  // each piece of metadata.  Those allocations are
  // generally done concurrently by different application
  // threads so must be done atomically.
2521
  Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2522 2523 2524
}

size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2525 2526 2527 2528
  size_t used = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
2529
    // Sum allocated_blocks_words for each metaspace
2530
    if (msp != NULL) {
2531
      used += msp->used_words_slow(mdtype);
2532 2533 2534 2535 2536
    }
  }
  return used * BytesPerWord;
}

E
ehelin 已提交
2537
size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2538 2539 2540 2541 2542
  size_t free = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
E
ehelin 已提交
2543
      free += msp->free_words_slow(mdtype);
2544 2545 2546 2547 2548
    }
  }
  return free * BytesPerWord;
}

2549
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2550 2551 2552
  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
    return 0;
  }
2553 2554 2555
  // Don't count the space in the freelists.  That space will be
  // added to the capacity calculation as needed.
  size_t capacity = 0;
2556 2557 2558 2559
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
2560
      capacity += msp->capacity_words_slow(mdtype);
2561 2562 2563 2564 2565
    }
  }
  return capacity * BytesPerWord;
}

E
ehelin 已提交
2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
size_t MetaspaceAux::capacity_bytes_slow() {
#ifdef PRODUCT
  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
#endif
  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
        " class_capacity + non_class_capacity " SIZE_FORMAT
        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
        allocated_capacity_bytes(), class_capacity + non_class_capacity,
        class_capacity, non_class_capacity));

  return class_capacity + non_class_capacity;
}

size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2584
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2585 2586 2587 2588 2589 2590
  return list == NULL ? 0 : list->reserved_bytes();
}

size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  return list == NULL ? 0 : list->committed_bytes();
2591 2592
}

E
ehelin 已提交
2593
size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2594

E
ehelin 已提交
2595
size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2596 2597
  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
  if (chunk_manager == NULL) {
2598 2599
    return 0;
  }
2600 2601
  chunk_manager->slow_verify();
  return chunk_manager->free_chunks_total_words();
2602 2603
}

E
ehelin 已提交
2604 2605
size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  return free_chunks_total_words(mdtype) * BytesPerWord;
2606 2607
}

E
ehelin 已提交
2608 2609 2610
size_t MetaspaceAux::free_chunks_total_words() {
  return free_chunks_total_words(Metaspace::ClassType) +
         free_chunks_total_words(Metaspace::NonClassType);
2611 2612
}

E
ehelin 已提交
2613 2614
size_t MetaspaceAux::free_chunks_total_bytes() {
  return free_chunks_total_words() * BytesPerWord;
2615 2616
}

2617 2618 2619 2620 2621
void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  gclog_or_tty->print(", [Metaspace:");
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print(" "  SIZE_FORMAT
                        "->" SIZE_FORMAT
2622
                        "("  SIZE_FORMAT ")",
2623
                        prev_metadata_used,
2624
                        allocated_used_bytes(),
E
ehelin 已提交
2625
                        reserved_bytes());
2626 2627 2628
  } else {
    gclog_or_tty->print(" "  SIZE_FORMAT "K"
                        "->" SIZE_FORMAT "K"
2629
                        "("  SIZE_FORMAT "K)",
E
ehelin 已提交
2630 2631 2632
                        prev_metadata_used/K,
                        allocated_used_bytes()/K,
                        reserved_bytes()/K);
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644
  }

  gclog_or_tty->print("]");
}

// This is printed when PrintGCDetails
void MetaspaceAux::print_on(outputStream* out) {
  Metaspace::MetadataType nct = Metaspace::NonClassType;

  out->print_cr(" Metaspace total "
                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                " reserved " SIZE_FORMAT "K",
E
ehelin 已提交
2645
                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
2646 2647 2648 2649 2650 2651

  out->print_cr("  data space     "
                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                " reserved " SIZE_FORMAT "K",
                allocated_capacity_bytes(nct)/K,
                allocated_used_bytes(nct)/K,
E
ehelin 已提交
2652
                reserved_bytes(nct)/K);
2653 2654 2655 2656 2657 2658 2659
  if (Metaspace::using_class_space()) {
    Metaspace::MetadataType ct = Metaspace::ClassType;
    out->print_cr("  class space    "
                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                  " reserved " SIZE_FORMAT "K",
                  allocated_capacity_bytes(ct)/K,
                  allocated_used_bytes(ct)/K,
E
ehelin 已提交
2660
                  reserved_bytes(ct)/K);
2661
  }
2662 2663 2664 2665 2666
}

// Print information for class space and data space separately.
// This is almost the same as above.
void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
E
ehelin 已提交
2667
  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2668 2669
  size_t capacity_bytes = capacity_bytes_slow(mdtype);
  size_t used_bytes = used_bytes_slow(mdtype);
E
ehelin 已提交
2670
  size_t free_bytes = free_bytes_slow(mdtype);
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
  size_t used_and_free = used_bytes + free_bytes +
                           free_chunks_capacity_bytes;
  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
             "K + unused in chunks " SIZE_FORMAT "K  + "
             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
             "K  capacity in allocated chunks " SIZE_FORMAT "K",
             used_bytes / K,
             free_bytes / K,
             free_chunks_capacity_bytes / K,
             used_and_free / K,
             capacity_bytes / K);
2682 2683
  // Accounting can only be correct if we got the values during a safepoint
  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2684 2685
}

2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
// Print total fragmentation for class metaspaces
void MetaspaceAux::print_class_waste(outputStream* out) {
  assert(Metaspace::using_class_space(), "class metaspace not used");
  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
    }
  }
  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
                "large count " SIZE_FORMAT,
                cls_specialized_count, cls_specialized_waste,
                cls_small_count, cls_small_waste,
                cls_medium_count, cls_medium_waste, cls_humongous_count);
}
2712

2713 2714
// Print total fragmentation for data and class metaspaces separately
void MetaspaceAux::print_waste(outputStream* out) {
2715 2716
  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2717 2718 2719 2720 2721

  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
2722 2723
      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2724
      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2725
      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2726
      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2727
      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2728
      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2729 2730 2731
    }
  }
  out->print_cr("Total fragmentation waste (words) doesn't count free space");
2732 2733
  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2734 2735
                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
                        "large count " SIZE_FORMAT,
2736
             specialized_count, specialized_waste, small_count,
2737
             small_waste, medium_count, medium_waste, humongous_count);
2738 2739 2740
  if (Metaspace::using_class_space()) {
    print_class_waste(out);
  }
2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
}

// Dump global metaspace things from the end of ClassLoaderDataGraph
void MetaspaceAux::dump(outputStream* out) {
  out->print_cr("All Metaspace:");
  out->print("data space: "); print_on(out, Metaspace::NonClassType);
  out->print("class space: "); print_on(out, Metaspace::ClassType);
  print_waste(out);
}

2751
void MetaspaceAux::verify_free_chunks() {
2752
  Metaspace::chunk_manager_metadata()->verify();
2753
  if (Metaspace::using_class_space()) {
2754
    Metaspace::chunk_manager_class()->verify();
2755
  }
2756 2757
}

2758 2759 2760
void MetaspaceAux::verify_capacity() {
#ifdef ASSERT
  size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2761
  // For purposes of the running sum of capacity, verify against capacity
2762 2763 2764 2765 2766
  size_t capacity_in_use_bytes = capacity_bytes_slow();
  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
    err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
            " capacity_bytes_slow()" SIZE_FORMAT,
            running_sum_capacity_bytes, capacity_in_use_bytes));
2767 2768 2769 2770 2771 2772 2773 2774 2775
  for (Metaspace::MetadataType i = Metaspace::ClassType;
       i < Metaspace:: MetadataTypeCount;
       i = (Metaspace::MetadataType)(i + 1)) {
    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
    assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
      err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
              " capacity_bytes_slow(%u)" SIZE_FORMAT,
              i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
  }
2776 2777 2778 2779 2780 2781
#endif
}

void MetaspaceAux::verify_used() {
#ifdef ASSERT
  size_t running_sum_used_bytes = allocated_used_bytes();
2782
  // For purposes of the running sum of used, verify against used
2783 2784 2785
  size_t used_in_use_bytes = used_bytes_slow();
  assert(allocated_used_bytes() == used_in_use_bytes,
    err_msg("allocated_used_bytes() " SIZE_FORMAT
2786
            " used_bytes_slow()" SIZE_FORMAT,
2787
            allocated_used_bytes(), used_in_use_bytes));
2788 2789 2790 2791 2792 2793 2794 2795 2796
  for (Metaspace::MetadataType i = Metaspace::ClassType;
       i < Metaspace:: MetadataTypeCount;
       i = (Metaspace::MetadataType)(i + 1)) {
    size_t used_in_use_bytes = used_bytes_slow(i);
    assert(allocated_used_bytes(i) == used_in_use_bytes,
      err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
              " used_bytes_slow(%u)" SIZE_FORMAT,
              i, allocated_used_bytes(i), i, used_in_use_bytes));
  }
2797 2798 2799 2800 2801 2802 2803 2804 2805
#endif
}

void MetaspaceAux::verify_metrics() {
  verify_capacity();
  verify_used();
}


2806 2807 2808
// Metaspace methods

size_t Metaspace::_first_chunk_word_size = 0;
2809
size_t Metaspace::_first_class_chunk_word_size = 0;
2810

2811 2812
Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  initialize(lock, type);
2813 2814 2815 2816
}

Metaspace::~Metaspace() {
  delete _vsm;
2817 2818 2819
  if (using_class_space()) {
    delete _class_vsm;
  }
2820 2821 2822 2823 2824
}

VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL;

2825 2826 2827
ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
ChunkManager* Metaspace::_chunk_manager_class = NULL;

2828 2829
#define VIRTUALSPACEMULTIPLIER 2

2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858
#ifdef _LP64
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
  // narrow_klass_base is the lower of the metaspace base and the cds base
  // (if cds is enabled).  The narrow_klass_shift depends on the distance
  // between the lower base and higher address.
  address lower_base;
  address higher_address;
  if (UseSharedSpaces) {
    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                          (address)(metaspace_base + class_metaspace_size()));
    lower_base = MIN2(metaspace_base, cds_base);
  } else {
    higher_address = metaspace_base + class_metaspace_size();
    lower_base = metaspace_base;
  }
  Universe::set_narrow_klass_base(lower_base);
  if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
    Universe::set_narrow_klass_shift(0);
  } else {
    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
  }
}

// Return TRUE if the specified metaspace_base and cds_base are close enough
// to work with compressed klass pointers.
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2859
  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2860 2861 2862 2863 2864 2865 2866 2867 2868
  address lower_base = MIN2((address)metaspace_base, cds_base);
  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                                (address)(metaspace_base + class_metaspace_size()));
  return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
}

// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  assert(using_class_space(), "called improperly");
2869
  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891
  assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
         "Metaspace size is too big");

  ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
                                             os::vm_allocation_granularity(),
                                             false, requested_addr, 0);
  if (!metaspace_rs.is_reserved()) {
    if (UseSharedSpaces) {
      // Keep trying to allocate the metaspace, increasing the requested_addr
      // by 1GB each time, until we reach an address that will no longer allow
      // use of CDS with compressed klass pointers.
      char *addr = requested_addr;
      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
        addr = addr + 1*G;
        metaspace_rs = ReservedSpace(class_metaspace_size(),
                                     os::vm_allocation_granularity(), false, addr, 0);
      }
    }

    // If no successful allocation then try to allocate the space anywhere.  If
    // that fails then OOM doom.  At this point we cannot try allocating the
2892 2893 2894
    // metaspace as if UseCompressedClassPointers is off because too much
    // initialization has happened that depends on UseCompressedClassPointers.
    // So, UseCompressedClassPointers cannot be turned off at this point.
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926
    if (!metaspace_rs.is_reserved()) {
      metaspace_rs = ReservedSpace(class_metaspace_size(),
                                   os::vm_allocation_granularity(), false);
      if (!metaspace_rs.is_reserved()) {
        vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
                                              class_metaspace_size()));
      }
    }
  }

  // If we got here then the metaspace got allocated.
  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);

  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
    FileMapInfo::stop_sharing_and_unmap(
        "Could not allocate metaspace at a compatible address");
  }

  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
                                  UseSharedSpaces ? (address)cds_base : 0);

  initialize_class_space(metaspace_rs);

  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
                            Universe::narrow_klass_base(), Universe::narrow_klass_shift());
    gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
                           class_metaspace_size(), metaspace_rs.base(), requested_addr);
  }
}

2927
// For UseCompressedClassPointers the class space is reserved above the top of
2928 2929 2930
// the Java heap.  The argument passed in is at the base of the compressed space.
void Metaspace::initialize_class_space(ReservedSpace rs) {
  // The reserved space size may be bigger because of alignment, esp with UseLargePages
2931 2932
  assert(rs.size() >= CompressedClassSpaceSize,
         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
2933 2934
  assert(using_class_space(), "Must be using class space");
  _class_space_list = new VirtualSpaceList(rs);
2935
  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
2936 2937 2938 2939
}

#endif

2940 2941 2942
void Metaspace::global_initialize() {
  // Initialize the alignment for shared spaces.
  int max_alignment = os::vm_page_size();
2943 2944
  size_t cds_total = 0;

2945
  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
2946 2947
                                         os::vm_allocation_granularity()));

2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
  MetaspaceShared::set_max_alignment(max_alignment);

  if (DumpSharedSpaces) {
    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);

    // Initialize with the sum of the shared space sizes.  The read-only
    // and read write metaspace chunks will be allocated out of this and the
    // remainder is the misc code and data chunks.
2959 2960
    cds_total = FileMapInfo::shared_spaces_size();
    _space_list = new VirtualSpaceList(cds_total/wordSize);
2961
    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
2962 2963 2964 2965

#ifdef _LP64
    // Set the compressed klass pointer base so that decoding of these pointers works
    // properly when creating the shared archive.
2966 2967
    assert(UseCompressedOops && UseCompressedClassPointers,
      "UseCompressedOops and UseCompressedClassPointers must be set");
2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
    if (TraceMetavirtualspaceAllocation && Verbose) {
      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
                             _space_list->current_virtual_space()->bottom());
    }

    // Set the shift to zero.
    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
           "CDS region is too large");
    Universe::set_narrow_klass_shift(0);
#endif

2980 2981 2982 2983
  } else {
    // If using shared space, open the file that contains the shared space
    // and map in the memory before initializing the rest of metaspace (so
    // the addresses don't conflict)
2984
    address cds_address = NULL;
2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998
    if (UseSharedSpaces) {
      FileMapInfo* mapinfo = new FileMapInfo();
      memset(mapinfo, 0, sizeof(FileMapInfo));

      // Open the shared archive file, read and validate the header. If
      // initialization fails, shared spaces [UseSharedSpaces] are
      // disabled and the file is closed.
      // Map in spaces now also
      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
        FileMapInfo::set_current_info(mapinfo);
      } else {
        assert(!mapinfo->is_open() && !UseSharedSpaces,
               "archive file not closed or shared spaces not disabled.");
      }
2999 3000 3001 3002 3003
      cds_total = FileMapInfo::shared_spaces_size();
      cds_address = (address)mapinfo->region_base(0);
    }

#ifdef _LP64
3004
    // If UseCompressedClassPointers is set then allocate the metaspace area
3005 3006 3007 3008 3009 3010 3011
    // above the heap and above the CDS area (if it exists).
    if (using_class_space()) {
      if (UseSharedSpaces) {
        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
      } else {
        allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
      }
3012
    }
3013
#endif
3014

3015
    // Initialize these before initializing the VirtualSpaceList
3016
    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3017 3018 3019 3020 3021
    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
    // Make the first class chunk bigger than a medium chunk so it's not put
    // on the medium chunk list.   The next chunk will be small and progress
    // from there.  This size calculated by -version.
    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3022
                                       (CompressedClassSpaceSize/BytesPerWord)*2);
3023
    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3024 3025
    // Arbitrarily set the initial virtual space to a multiple
    // of the boot class loader size.
3026
    size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
3027 3028
    // Initialize the list of virtual spaces.
    _space_list = new VirtualSpaceList(word_size);
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039
    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  }
}

Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
                                               size_t chunk_word_size,
                                               size_t chunk_bunch) {
  // Get a chunk from the chunk freelist
  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
  if (chunk != NULL) {
    return chunk;
3040
  }
3041 3042

  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
3043 3044
}

3045
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3046 3047 3048

  assert(space_list() != NULL,
    "Metadata VirtualSpaceList has not been initialized");
3049 3050
  assert(chunk_manager_metadata() != NULL,
    "Metadata ChunkManager has not been initialized");
3051

3052
  _vsm = new SpaceManager(NonClassType, lock);
3053 3054 3055
  if (_vsm == NULL) {
    return;
  }
3056 3057
  size_t word_size;
  size_t class_word_size;
3058
  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3059

3060
  if (using_class_space()) {
3061 3062 3063 3064
  assert(class_space_list() != NULL,
    "Class VirtualSpaceList has not been initialized");
  assert(chunk_manager_class() != NULL,
    "Class ChunkManager has not been initialized");
3065

3066
    // Allocate SpaceManager for classes.
3067
    _class_vsm = new SpaceManager(ClassType, lock);
3068 3069 3070
    if (_class_vsm == NULL) {
      return;
    }
3071 3072 3073 3074 3075
  }

  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);

  // Allocate chunk for metadata objects
3076 3077 3078
  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
                                                  word_size,
                                                  vsm()->medium_chunk_bunch());
3079 3080 3081 3082 3083 3084 3085
  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  if (new_chunk != NULL) {
    // Add to this manager's list of chunks in use and current_chunk().
    vsm()->add_chunk(new_chunk, true);
  }

  // Allocate chunk for class metadata objects
3086
  if (using_class_space()) {
3087 3088 3089
    Metachunk* class_chunk = get_initialization_chunk(ClassType,
                                                      class_word_size,
                                                      class_vsm()->medium_chunk_bunch());
3090 3091 3092
    if (class_chunk != NULL) {
      class_vsm()->add_chunk(class_chunk, true);
    }
3093
  }
3094 3095 3096

  _alloc_record_head = NULL;
  _alloc_record_tail = NULL;
3097 3098
}

3099 3100 3101 3102 3103
size_t Metaspace::align_word_size_up(size_t word_size) {
  size_t byte_size = word_size * wordSize;
  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
}

3104 3105
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  // DumpSharedSpaces doesn't use class metadata area (yet)
3106
  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3107
  if (is_class_space_allocation(mdtype)) {
3108
    return  class_vsm()->allocate(word_size);
3109
  } else {
3110
    return  vsm()->allocate(word_size);
3111 3112 3113
  }
}

3114 3115 3116 3117
MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  MetaWord* result;
  MetaspaceGC::set_expand_after_GC(true);
  size_t before_inc = MetaspaceGC::capacity_until_GC();
3118 3119
  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3120 3121 3122 3123
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
      " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
  }
3124

3125 3126 3127 3128 3129
  result = allocate(word_size, mdtype);

  return result;
}

3130 3131 3132 3133 3134 3135 3136
// Space allocated in the Metaspace.  This may
// be across several metadata virtual spaces.
char* Metaspace::bottom() const {
  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
  return (char*)vsm()->current_chunk()->bottom();
}

3137
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3138 3139 3140 3141 3142
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  }
3143 3144
}

E
ehelin 已提交
3145
size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3146 3147 3148 3149 3150
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_free_in_chunks_in_use();
  }
3151 3152 3153 3154 3155 3156 3157
}

// Space capacity in the Metaspace.  It includes
// space in the list of chunks from which allocations
// have been made. Don't include space in the global freelist and
// in the space available in the dictionary which
// is already counted in some chunk.
3158
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3159 3160 3161 3162 3163
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_capacity_in_chunks_in_use();
  }
3164 3165
}

3166 3167 3168 3169 3170 3171 3172 3173
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
  return used_words_slow(mdtype) * BytesPerWord;
}

size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
  return capacity_words_slow(mdtype) * BytesPerWord;
}

3174 3175 3176
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
  if (SafepointSynchronize::is_at_safepoint()) {
    assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3177
    // Don't take Heap_lock
3178
    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3179 3180 3181 3182 3183 3184 3185
    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
      // Dark matter.  Too small for dictionary.
#ifdef ASSERT
      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
      return;
    }
3186 3187
    if (is_class && using_class_space()) {
      class_vsm()->deallocate(ptr, word_size);
3188
    } else {
3189
      vsm()->deallocate(ptr, word_size);
3190 3191
    }
  } else {
3192
    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3193

3194 3195 3196 3197 3198 3199 3200
    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
      // Dark matter.  Too small for dictionary.
#ifdef ASSERT
      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
      return;
    }
3201
    if (is_class && using_class_space()) {
3202
      class_vsm()->deallocate(ptr, word_size);
3203
    } else {
3204
      vsm()->deallocate(ptr, word_size);
3205 3206 3207 3208
    }
  }
}

3209
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3210
                              bool read_only, MetaspaceObj::Type type, TRAPS) {
3211 3212 3213 3214 3215
  if (HAS_PENDING_EXCEPTION) {
    assert(false, "Should not allocate with exception pending");
    return NULL;  // caller does a CHECK_NULL too
  }

3216 3217
  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;

3218 3219 3220 3221 3222 3223 3224 3225 3226
  // SSS: Should we align the allocations and make sure the sizes are aligned.
  MetaWord* result = NULL;

  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
        "ClassLoaderData::the_null_class_loader_data() should have been used.");
  // Allocate in metaspaces without taking out a lock, because it deadlocks
  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  // to revisit this for application class data sharing.
  if (DumpSharedSpaces) {
3227 3228 3229
    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
    result = space->allocate(word_size, NonClassType);
3230 3231
    if (result == NULL) {
      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3232 3233
    } else {
      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3234
    }
3235
    return Metablock::initialize(result, word_size);
3236 3237 3238 3239 3240 3241 3242
  }

  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);

  if (result == NULL) {
    // Try to clean out some memory and retry.
    result =
3243
      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3244 3245 3246 3247
        loader_data, word_size, mdtype);

    // If result is still null, we are out of memory.
    if (result == NULL) {
3248 3249 3250
      if (Verbose && TraceMetadataChunkAllocation) {
        gclog_or_tty->print_cr("Metaspace allocation failed for size "
          SIZE_FORMAT, word_size);
3251
        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
3252 3253
        MetaspaceAux::dump(gclog_or_tty);
      }
3254
      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3255 3256
      const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
                                                                     "Metadata space";
3257
      report_java_out_of_memory(space_string);
3258 3259 3260 3261

      if (JvmtiExport::should_post_resource_exhausted()) {
        JvmtiExport::post_resource_exhausted(
            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3262 3263
            space_string);
      }
3264
      if (is_class_space_allocation(mdtype)) {
3265 3266 3267
        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
      } else {
        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
3268 3269 3270
      }
    }
  }
3271
  return Metablock::initialize(result, word_size);
3272 3273
}

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  assert(DumpSharedSpaces, "sanity");

  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
  if (_alloc_record_head == NULL) {
    _alloc_record_head = _alloc_record_tail = rec;
  } else {
    _alloc_record_tail->_next = rec;
    _alloc_record_tail = rec;
  }
}

void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");

  address last_addr = (address)bottom();

  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
    address ptr = rec->_ptr;
    if (last_addr < ptr) {
      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
    }
    closure->doit(ptr, rec->_type, rec->_byte_size);
    last_addr = ptr + rec->_byte_size;
  }

  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
  if (last_addr < top) {
    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
  }
}

3306 3307 3308 3309
void Metaspace::purge(MetadataType mdtype) {
  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
}

3310 3311 3312
void Metaspace::purge() {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
3313
  purge(NonClassType);
3314
  if (using_class_space()) {
3315
    purge(ClassType);
3316
  }
3317 3318
}

3319 3320 3321
void Metaspace::print_on(outputStream* out) const {
  // Print both class virtual space counts and metaspace.
  if (Verbose) {
3322 3323
    vsm()->print_on(out);
    if (using_class_space()) {
3324
      class_vsm()->print_on(out);
3325
    }
3326 3327 3328
  }
}

3329
bool Metaspace::contains(const void * ptr) {
3330 3331 3332
  if (MetaspaceShared::is_in_shared_space(ptr)) {
    return true;
  }
3333 3334 3335 3336 3337
  // This is checked while unlocked.  As long as the virtualspaces are added
  // at the end, the pointer will be in one of them.  The virtual spaces
  // aren't deleted presently.  When they are, some sort of locking might
  // be needed.  Note, locking this can cause inversion problems with the
  // caller in MetaspaceObj::is_metadata() function.
3338
  return space_list()->contains(ptr) ||
3339
         (using_class_space() && class_space_list()->contains(ptr));
3340 3341 3342 3343
}

void Metaspace::verify() {
  vsm()->verify();
3344 3345 3346
  if (using_class_space()) {
    class_vsm()->verify();
  }
3347 3348 3349 3350 3351
}

void Metaspace::dump(outputStream* const out) const {
  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  vsm()->dump(out);
3352 3353 3354 3355
  if (using_class_space()) {
    out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
    class_vsm()->dump(out);
  }
3356
}
3357 3358 3359 3360 3361

/////////////// Unit tests ///////////////

#ifndef PRODUCT

3362
class TestMetaspaceAuxTest : AllStatic {
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401
 public:
  static void test_reserved() {
    size_t reserved = MetaspaceAux::reserved_bytes();

    assert(reserved > 0, "assert");

    size_t committed  = MetaspaceAux::committed_bytes();
    assert(committed <= reserved, "assert");

    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
    assert(reserved_metadata > 0, "assert");
    assert(reserved_metadata <= reserved, "assert");

    if (UseCompressedClassPointers) {
      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
      assert(reserved_class > 0, "assert");
      assert(reserved_class < reserved, "assert");
    }
  }

  static void test_committed() {
    size_t committed = MetaspaceAux::committed_bytes();

    assert(committed > 0, "assert");

    size_t reserved  = MetaspaceAux::reserved_bytes();
    assert(committed <= reserved, "assert");

    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
    assert(committed_metadata > 0, "assert");
    assert(committed_metadata <= committed, "assert");

    if (UseCompressedClassPointers) {
      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
      assert(committed_class > 0, "assert");
      assert(committed_class < committed, "assert");
    }
  }

3402 3403 3404 3405 3406 3407 3408 3409 3410 3411
  static void test_virtual_space_list_large_chunk() {
    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
    // vm_allocation_granularity aligned on Windows.
    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
    large_size += (os::vm_page_size()/BytesPerWord);
    vs_list->get_new_chunk(large_size, large_size, 0);
  }

3412 3413 3414
  static void test() {
    test_reserved();
    test_committed();
3415
    test_virtual_space_list_large_chunk();
3416 3417 3418
  }
};

3419 3420
void TestMetaspaceAux_test() {
  TestMetaspaceAuxTest::test();
3421 3422 3423
}

#endif