metaspace.cpp 115.9 KB
Newer Older
1
/*
2
 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/binaryTreeDictionary.hpp"
27
#include "memory/freeList.hpp"
28 29 30
#include "memory/collectorPolicy.hpp"
#include "memory/filemap.hpp"
#include "memory/freeList.hpp"
31 32
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
33 34 35 36 37
#include "memory/metaspace.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/globals.hpp"
38
#include "runtime/java.hpp"
39
#include "runtime/mutex.hpp"
40
#include "runtime/orderAccess.hpp"
41 42 43 44
#include "services/memTracker.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"

45 46
typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
47 48 49 50
// Define this macro to enable slow integrity checking of
// the free chunk lists
const bool metaspace_slow_verify = false;

51 52 53 54 55 56 57
// Parameters for stress mode testing
const uint metadata_deallocate_a_lot_block = 10;
const uint metadata_deallocate_a_lock_chunk = 3;
size_t const allocation_from_dictionary_limit = 64 * K;

MetaWord* last_allocated = 0;

58 59
size_t Metaspace::_class_metaspace_size;

60 61
// Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex {
62 63 64 65 66 67 68 69 70 71 72 73 74 75
  ZeroIndex = 0,
  SpecializedIndex = ZeroIndex,
  SmallIndex = SpecializedIndex + 1,
  MediumIndex = SmallIndex + 1,
  HumongousIndex = MediumIndex + 1,
  NumberOfFreeLists = 3,
  NumberOfInUseLists = 4
};

enum ChunkSizes {    // in words.
  ClassSpecializedChunk = 128,
  SpecializedChunk = 128,
  ClassSmallChunk = 256,
  SmallChunk = 512,
76
  ClassMediumChunk = 4 * K,
77 78
  MediumChunk = 8 * K,
  HumongousChunkGranularity = 8
79 80 81
};

static ChunkIndex next_chunk_index(ChunkIndex i) {
82
  assert(i < NumberOfInUseLists, "Out of bound");
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
  return (ChunkIndex) (i+1);
}

// Originally _capacity_until_GC was set to MetaspaceSize here but
// the default MetaspaceSize before argument processing was being
// used which was not the desired value.  See the code
// in should_expand() to see how the initialization is handled
// now.
size_t MetaspaceGC::_capacity_until_GC = 0;
bool MetaspaceGC::_expand_after_GC = false;
uint MetaspaceGC::_shrink_factor = 0;
bool MetaspaceGC::_should_concurrent_collect = false;

// Blocks of space for metadata are allocated out of Metachunks.
//
// Metachunk are allocated out of MetadataVirtualspaces and once
// allocated there is no explicit link between a Metachunk and
// the MetadataVirtualspaces from which it was allocated.
//
// Each SpaceManager maintains a
// list of the chunks it is using and the current chunk.  The current
// chunk is the chunk from which allocations are done.  Space freed in
// a chunk is placed on the free list of blocks (BlockFreelist) and
// reused from there.

108
typedef class FreeList<Metachunk> ChunkList;
109 110 111 112 113 114 115 116

// Manages the global free lists of chunks.
// Has three lists of free chunks, and a total size and
// count that includes all three

class ChunkManager VALUE_OBJ_CLASS_SPEC {

  // Free list of chunks of different sizes.
117
  //   SpecializedChunk
118 119 120
  //   SmallChunk
  //   MediumChunk
  //   HumongousChunk
121 122
  ChunkList _free_chunks[NumberOfFreeLists];

123

124 125
  //   HumongousChunk
  ChunkTreeDictionary _humongous_dictionary;
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

  // ChunkManager in all lists of this type
  size_t _free_chunks_total;
  size_t _free_chunks_count;

  void dec_free_chunks_total(size_t v) {
    assert(_free_chunks_count > 0 &&
             _free_chunks_total > 0,
             "About to go negative");
    Atomic::add_ptr(-1, &_free_chunks_count);
    jlong minus_v = (jlong) - (jlong) v;
    Atomic::add_ptr(minus_v, &_free_chunks_total);
  }

  // Debug support

  size_t sum_free_chunks();
  size_t sum_free_chunks_count();

  void locked_verify_free_chunks_total();
146 147 148 149 150
  void slow_locked_verify_free_chunks_total() {
    if (metaspace_slow_verify) {
      locked_verify_free_chunks_total();
    }
  }
151
  void locked_verify_free_chunks_count();
152 153 154 155 156
  void slow_locked_verify_free_chunks_count() {
    if (metaspace_slow_verify) {
      locked_verify_free_chunks_count();
    }
  }
157 158 159 160 161 162 163 164 165 166
  void verify_free_chunks_count();

 public:

  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}

  // add or delete (return) a chunk to the global freelist.
  Metachunk* chunk_freelist_allocate(size_t word_size);
  void chunk_freelist_deallocate(Metachunk* chunk);

167 168 169 170
  // Map a size to a list index assuming that there are lists
  // for special, small, medium, and humongous chunks.
  static ChunkIndex list_index(size_t size);

171 172 173 174
  // Remove the chunk from its freelist.  It is
  // expected to be on one of the _free_chunks[] lists.
  void remove_chunk(Metachunk* chunk);

175 176 177 178
  // Add the simple linked list of chunks to the freelist of chunks
  // of type index.
  void return_chunks(ChunkIndex index, Metachunk* chunks);

179 180 181 182 183 184 185 186 187 188 189
  // Total of the space in the free chunks list
  size_t free_chunks_total();
  size_t free_chunks_total_in_bytes();

  // Number of chunks in the free chunks list
  size_t free_chunks_count();

  void inc_free_chunks_total(size_t v, size_t count = 1) {
    Atomic::add_ptr(count, &_free_chunks_count);
    Atomic::add_ptr(v, &_free_chunks_total);
  }
190 191 192
  ChunkTreeDictionary* humongous_dictionary() {
    return &_humongous_dictionary;
  }
193 194 195 196 197 198 199 200 201 202 203 204 205

  ChunkList* free_chunks(ChunkIndex index);

  // Returns the list for the given chunk word size.
  ChunkList* find_free_chunks_list(size_t word_size);

  // Add and remove from a list by size.  Selects
  // list based on size of chunk.
  void free_chunks_put(Metachunk* chuck);
  Metachunk* free_chunks_get(size_t chunk_word_size);

  // Debug support
  void verify();
206 207 208 209 210
  void slow_verify() {
    if (metaspace_slow_verify) {
      verify();
    }
  }
211
  void locked_verify();
212 213 214 215 216
  void slow_locked_verify() {
    if (metaspace_slow_verify) {
      locked_verify();
    }
  }
217 218 219 220
  void verify_free_chunks_total();

  void locked_print_free_chunks(outputStream* st);
  void locked_print_sum_free_chunks(outputStream* st);
221 222

  void print_on(outputStream* st);
223 224 225 226 227
};

// Used to manage the free list of Metablocks (a block corresponds
// to the allocation of a quantum of metadata).
class BlockFreelist VALUE_OBJ_CLASS_SPEC {
228 229
  BlockTreeDictionary* _dictionary;
  static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
230 231

  // Accessors
232
  BlockTreeDictionary* dictionary() const { return _dictionary; }
233 234 235 236 237 238

 public:
  BlockFreelist();
  ~BlockFreelist();

  // Get and return a block to the free list
239 240
  MetaWord* get_block(size_t word_size);
  void return_block(MetaWord* p, size_t word_size);
241

242 243
  size_t total_size() {
  if (dictionary() == NULL) {
244
    return 0;
245 246
  } else {
    return dictionary()->total_size();
247
  }
248
}
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263

  void print_on(outputStream* st) const;
};

class VirtualSpaceNode : public CHeapObj<mtClass> {
  friend class VirtualSpaceList;

  // Link to next VirtualSpaceNode
  VirtualSpaceNode* _next;

  // total in the VirtualSpace
  MemRegion _reserved;
  ReservedSpace _rs;
  VirtualSpace _virtual_space;
  MetaWord* _top;
264 265
  // count of chunks contained in this VirtualSpace
  uintx _container_count;
266 267 268 269 270

  // Convenience functions to access the _virtual_space
  char* low()  const { return virtual_space()->low(); }
  char* high() const { return virtual_space()->high(); }

271 272 273 274 275 276 277 278 279
  // The first Metachunk will be allocated at the bottom of the
  // VirtualSpace
  Metachunk* first_chunk() { return (Metachunk*) bottom(); }

  void inc_container_count();
#ifdef ASSERT
  uint container_count_slow();
#endif

280 281 282
 public:

  VirtualSpaceNode(size_t byte_size);
283
  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
284 285
  ~VirtualSpaceNode();

286 287 288 289
  // Convenience functions for logical bottom and end
  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }

290 291 292 293 294 295 296 297 298 299 300 301
  // address of next available space in _virtual_space;
  // Accessors
  VirtualSpaceNode* next() { return _next; }
  void set_next(VirtualSpaceNode* v) { _next = v; }

  void set_reserved(MemRegion const v) { _reserved = v; }
  void set_top(MetaWord* v) { _top = v; }

  // Accessors
  MemRegion* reserved() { return &_reserved; }
  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }

302
  // Returns true if "word_size" is available in the VirtualSpace
303 304 305 306 307
  bool is_available(size_t word_size) { return _top + word_size <= end(); }

  MetaWord* top() const { return _top; }
  void inc_top(size_t word_size) { _top += word_size; }

308 309 310 311 312 313
  uintx container_count() { return _container_count; }
  void dec_container_count();
#ifdef ASSERT
  void verify_container_count();
#endif

314 315 316
  // used and capacity in this single entry in the list
  size_t used_words_in_vs() const;
  size_t capacity_words_in_vs() const;
317
  size_t free_words_in_vs() const;
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332

  bool initialize();

  // get space from the virtual space
  Metachunk* take_from_committed(size_t chunk_word_size);

  // Allocate a chunk from the virtual space and return it.
  Metachunk* get_chunk_vs(size_t chunk_word_size);
  Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);

  // Expands/shrinks the committed space in a virtual space.  Delegates
  // to Virtualspace
  bool expand_by(size_t words, bool pre_touch = false);
  bool shrink_by(size_t words);

333 334 335 336
  // In preparation for deleting this node, remove all the chunks
  // in the node from any freelist.
  void purge(ChunkManager* chunk_manager);

337
#ifdef ASSERT
338 339 340 341
  // Debug support
  static void verify_virtual_space_total();
  static void verify_virtual_space_count();
  void mangle();
342
#endif
343 344 345 346 347

  void print_on(outputStream* st) const;
};

  // byte_size is the size of the associated virtualspace.
348
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
349 350 351
  // align up to vm allocation granularity
  byte_size = align_size_up(byte_size, os::vm_allocation_granularity());

352 353 354
  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
  // configurable address, generally at the top of the Java heap so other
  // memory addresses don't conflict.
355
  if (DumpSharedSpaces) {
356
    char* shared_base = (char*)SharedBaseAddress;
357 358
    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
    if (_rs.is_reserved()) {
359
      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
360
    } else {
361
      // Get a mmap region anywhere if the SharedBaseAddress fails.
362 363 364 365 366 367 368 369 370 371
      _rs = ReservedSpace(byte_size);
    }
    MetaspaceShared::set_shared_rs(&_rs);
  } else {
    _rs = ReservedSpace(byte_size);
  }

  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
}

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
  Metachunk* chunk = first_chunk();
  Metachunk* invalid_chunk = (Metachunk*) top();
  while (chunk < invalid_chunk ) {
    assert(chunk->is_free(), "Should be marked free");
      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
      chunk_manager->remove_chunk(chunk);
      assert(chunk->next() == NULL &&
             chunk->prev() == NULL,
             "Was not removed from its list");
      chunk = (Metachunk*) next;
  }
}

#ifdef ASSERT
uint VirtualSpaceNode::container_count_slow() {
  uint count = 0;
  Metachunk* chunk = first_chunk();
  Metachunk* invalid_chunk = (Metachunk*) top();
  while (chunk < invalid_chunk ) {
    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
    // Don't count the chunks on the free lists.  Those are
    // still part of the VirtualSpaceNode but not currently
    // counted.
    if (!chunk->is_free()) {
      count++;
    }
    chunk = (Metachunk*) next;
  }
  return count;
}
#endif

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
// List of VirtualSpaces for metadata allocation.
// It has a  _next link for singly linked list and a MemRegion
// for total space in the VirtualSpace.
class VirtualSpaceList : public CHeapObj<mtClass> {
  friend class VirtualSpaceNode;

  enum VirtualSpaceSizes {
    VirtualSpaceSize = 256 * K
  };

  // Global list of virtual spaces
  // Head of the list
  VirtualSpaceNode* _virtual_space_list;
  // virtual space currently being used for allocations
  VirtualSpaceNode* _current_virtual_space;
  // Free chunk list for all other metadata
  ChunkManager      _chunk_manager;

  // Can this virtual list allocate >1 spaces?  Also, used to determine
  // whether to allocate unlimited small chunks in this virtual space
  bool _is_class;
  bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }

  // Sum of space in all virtual spaces and number of virtual spaces
  size_t _virtual_space_total;
  size_t _virtual_space_count;

  ~VirtualSpaceList();

  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }

  void set_virtual_space_list(VirtualSpaceNode* v) {
    _virtual_space_list = v;
  }
  void set_current_virtual_space(VirtualSpaceNode* v) {
    _current_virtual_space = v;
  }

  void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);

  // Get another virtual space and add it to the list.  This
  // is typically prompted by a failed attempt to allocate a chunk
  // and is typically followed by the allocation of a chunk.
  bool grow_vs(size_t vs_word_size);

 public:
  VirtualSpaceList(size_t word_size);
  VirtualSpaceList(ReservedSpace rs);

454 455
  size_t free_bytes();

456 457 458 459 460 461 462 463
  Metachunk* get_new_chunk(size_t word_size,
                           size_t grow_chunks_by_words,
                           size_t medium_chunk_bunch);

  // Get the first chunk for a Metaspace.  Used for
  // special cases such as the boot class loader, reflection
  // class loader and anonymous class loader.
  Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
464 465 466 467 468 469 470 471 472 473 474 475 476

  VirtualSpaceNode* current_virtual_space() {
    return _current_virtual_space;
  }

  ChunkManager* chunk_manager() { return &_chunk_manager; }
  bool is_class() const { return _is_class; }

  // Allocate the first virtualspace.
  void initialize(size_t word_size);

  size_t virtual_space_total() { return _virtual_space_total; }

477 478 479 480 481 482 483
  void inc_virtual_space_total(size_t v);
  void dec_virtual_space_total(size_t v);
  void inc_virtual_space_count();
  void dec_virtual_space_count();

  // Unlink empty VirtualSpaceNodes and free it.
  void purge();
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563

  // Used and capacity in the entire list of virtual spaces.
  // These are global values shared by all Metaspaces
  size_t capacity_words_sum();
  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
  size_t used_words_sum();
  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }

  bool contains(const void *ptr);

  void print_on(outputStream* st) const;

  class VirtualSpaceListIterator : public StackObj {
    VirtualSpaceNode* _virtual_spaces;
   public:
    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
      _virtual_spaces(virtual_spaces) {}

    bool repeat() {
      return _virtual_spaces != NULL;
    }

    VirtualSpaceNode* get_next() {
      VirtualSpaceNode* result = _virtual_spaces;
      if (_virtual_spaces != NULL) {
        _virtual_spaces = _virtual_spaces->next();
      }
      return result;
    }
  };
};

class Metadebug : AllStatic {
  // Debugging support for Metaspaces
  static int _deallocate_block_a_lot_count;
  static int _deallocate_chunk_a_lot_count;
  static int _allocation_fail_alot_count;

 public:
  static int deallocate_block_a_lot_count() {
    return _deallocate_block_a_lot_count;
  }
  static void set_deallocate_block_a_lot_count(int v) {
    _deallocate_block_a_lot_count = v;
  }
  static void inc_deallocate_block_a_lot_count() {
    _deallocate_block_a_lot_count++;
  }
  static int deallocate_chunk_a_lot_count() {
    return _deallocate_chunk_a_lot_count;
  }
  static void reset_deallocate_chunk_a_lot_count() {
    _deallocate_chunk_a_lot_count = 1;
  }
  static void inc_deallocate_chunk_a_lot_count() {
    _deallocate_chunk_a_lot_count++;
  }

  static void init_allocation_fail_alot_count();
#ifdef ASSERT
  static bool test_metadata_failure();
#endif

  static void deallocate_chunk_a_lot(SpaceManager* sm,
                                     size_t chunk_word_size);
  static void deallocate_block_a_lot(SpaceManager* sm,
                                     size_t chunk_word_size);

};

int Metadebug::_deallocate_block_a_lot_count = 0;
int Metadebug::_deallocate_chunk_a_lot_count = 0;
int Metadebug::_allocation_fail_alot_count = 0;

//  SpaceManager - used by Metaspace to handle allocations
class SpaceManager : public CHeapObj<mtClass> {
  friend class Metaspace;
  friend class Metadebug;

 private:
564

565 566 567
  // protects allocations and contains.
  Mutex* const _lock;

568 569 570
  // Type of metadata allocated.
  Metaspace::MetadataType _mdtype;

571 572 573
  // Chunk related size
  size_t _medium_chunk_bunch;

574 575 576
  // List of chunks in use by this SpaceManager.  Allocations
  // are done from the current chunk.  The list is used for deallocating
  // chunks when the SpaceManager is freed.
577
  Metachunk* _chunks_in_use[NumberOfInUseLists];
578 579 580 581 582 583 584 585 586 587
  Metachunk* _current_chunk;

  // Virtual space where allocation comes from.
  VirtualSpaceList* _vs_list;

  // Number of small chunks to allocate to a manager
  // If class space manager, small chunks are unlimited
  static uint const _small_chunk_limit;

  // Sum of all space in allocated chunks
588 589 590 591 592
  size_t _allocated_blocks_words;

  // Sum of all allocated chunks
  size_t _allocated_chunks_words;
  size_t _allocated_chunks_count;
593 594 595 596 597 598 599 600 601 602 603 604

  // Free lists of blocks are per SpaceManager since they
  // are assumed to be in chunks in use by the SpaceManager
  // and all chunks in use by a SpaceManager are freed when
  // the class loader using the SpaceManager is collected.
  BlockFreelist _block_freelists;

  // protects virtualspace and chunk expansions
  static const char*  _expand_lock_name;
  static const int    _expand_lock_rank;
  static Mutex* const _expand_lock;

605
 private:
606 607 608 609 610 611 612 613
  // Accessors
  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
  void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }

  BlockFreelist* block_freelists() const {
    return (BlockFreelist*) &_block_freelists;
  }

614
  Metaspace::MetadataType mdtype() { return _mdtype; }
615 616 617 618 619 620 621 622 623 624 625 626 627 628
  VirtualSpaceList* vs_list() const    { return _vs_list; }

  Metachunk* current_chunk() const { return _current_chunk; }
  void set_current_chunk(Metachunk* v) {
    _current_chunk = v;
  }

  Metachunk* find_current_chunk(size_t word_size);

  // Add chunk to the list of chunks in use
  void add_chunk(Metachunk* v, bool make_current);

  Mutex* lock() const { return _lock; }

629 630 631 632 633
  const char* chunk_size_name(ChunkIndex index) const;

 protected:
  void initialize();

634
 public:
635 636
  SpaceManager(Metaspace::MetadataType mdtype,
               Mutex* lock,
637
               VirtualSpaceList* vs_list);
638 639
  ~SpaceManager();

640 641
  enum ChunkMultiples {
    MediumChunkMultiple = 4
642 643 644
  };

  // Accessors
645 646 647 648 649
  size_t specialized_chunk_size() { return SpecializedChunk; }
  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
  size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }

650 651 652 653 654
  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
  size_t allocated_chunks_count() const { return _allocated_chunks_count; }

655
  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
656 657 658

  static Mutex* expand_lock() { return _expand_lock; }

659 660 661 662 663 664 665 666 667 668 669 670
  // Increment the per Metaspace and global running sums for Metachunks
  // by the given size.  This is used when a Metachunk to added to
  // the in-use list.
  void inc_size_metrics(size_t words);
  // Increment the per Metaspace and global running sums Metablocks by the given
  // size.  This is used when a Metablock is allocated.
  void inc_used_metrics(size_t words);
  // Delete the portion of the running sums for this SpaceManager. That is,
  // the globals running sums for the Metachunks and Metablocks are
  // decremented for all the Metachunks in-use by this SpaceManager.
  void dec_total_from_size_metrics();

671 672 673 674 675
  // Set the sizes for the initial chunks.
  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                               size_t* chunk_word_size,
                               size_t* class_chunk_word_size);

676 677 678 679 680 681 682 683 684
  size_t sum_capacity_in_chunks_in_use() const;
  size_t sum_used_in_chunks_in_use() const;
  size_t sum_free_in_chunks_in_use() const;
  size_t sum_waste_in_chunks_in_use() const;
  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;

  size_t sum_count_in_chunks_in_use();
  size_t sum_count_in_chunks_in_use(ChunkIndex i);

685 686
  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);

687 688 689 690 691
  // Block allocation and deallocation.
  // Allocates a block from the current chunk
  MetaWord* allocate(size_t word_size);

  // Helper for allocations
692
  MetaWord* allocate_work(size_t word_size);
693 694

  // Returns a block to the per manager freelist
695
  void deallocate(MetaWord* p, size_t word_size);
696 697 698 699 700 701 702 703

  // Based on the allocation size and a minimum chunk size,
  // returned chunk size (for expanding space for chunk allocation).
  size_t calc_chunk_size(size_t allocation_word_size);

  // Called when an allocation from the current chunk fails.
  // Gets a new chunk (may require getting a new virtual space),
  // and allocates from that chunk.
704
  MetaWord* grow_and_allocate(size_t word_size);
705 706 707 708 709 710 711 712

  // debugging support.

  void dump(outputStream* const out) const;
  void print_on(outputStream* st) const;
  void locked_print_chunks_in_use_on(outputStream* st) const;

  void verify();
713
  void verify_chunk_size(Metachunk* chunk);
714
  NOT_PRODUCT(void mangle_freed_chunks();)
715
#ifdef ASSERT
716
  void verify_allocated_blocks_words();
717
#endif
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734

  size_t get_raw_word_size(size_t word_size) {
    // If only the dictionary is going to be used (i.e., no
    // indexed free list), then there is a minimum size requirement.
    // MinChunkSize is a placeholder for the real minimum size JJJ
    size_t byte_size = word_size * BytesPerWord;

    size_t byte_size_with_overhead = byte_size + Metablock::overhead();

    size_t raw_bytes_size = MAX2(byte_size_with_overhead,
                                 Metablock::min_block_byte_size());
    raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
    size_t raw_word_size = raw_bytes_size / BytesPerWord;
    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");

    return raw_word_size;
  }
735 736 737 738 739 740 741 742 743 744 745 746
};

uint const SpaceManager::_small_chunk_limit = 4;

const char* SpaceManager::_expand_lock_name =
  "SpaceManager chunk allocation lock";
const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
Mutex* const SpaceManager::_expand_lock =
  new Mutex(SpaceManager::_expand_lock_rank,
            SpaceManager::_expand_lock_name,
            Mutex::_allow_vm_block_flag);

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
void VirtualSpaceNode::inc_container_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _container_count++;
  assert(_container_count == container_count_slow(),
         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
                 "container_count_slow() " SIZE_FORMAT,
                 _container_count, container_count_slow()));
}

void VirtualSpaceNode::dec_container_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _container_count--;
}

#ifdef ASSERT
void VirtualSpaceNode::verify_container_count() {
  assert(_container_count == container_count_slow(),
    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
}
#endif

769 770 771 772 773 774 775 776 777 778 779 780 781
// BlockFreelist methods

BlockFreelist::BlockFreelist() : _dictionary(NULL) {}

BlockFreelist::~BlockFreelist() {
  if (_dictionary != NULL) {
    if (Verbose && TraceMetadataChunkAllocation) {
      _dictionary->print_free_lists(gclog_or_tty);
    }
    delete _dictionary;
  }
}

782 783 784 785 786
Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
  Metablock* block = (Metablock*) p;
  block->set_word_size(word_size);
  block->set_prev(NULL);
  block->set_next(NULL);
787 788 789 790

  return block;
}

791 792
void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
  Metablock* free_chunk = initialize_free_chunk(p, word_size);
793
  if (dictionary() == NULL) {
794
   _dictionary = new BlockTreeDictionary();
795
  }
796
  dictionary()->return_chunk(free_chunk);
797 798
}

799
MetaWord* BlockFreelist::get_block(size_t word_size) {
800 801 802 803
  if (dictionary() == NULL) {
    return NULL;
  }

804 805
  if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
    // Dark matter.  Too small for dictionary.
806 807 808
    return NULL;
  }

809 810 811 812 813 814 815
  Metablock* free_block =
    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
  if (free_block == NULL) {
    return NULL;
  }

  return (MetaWord*) free_block;
816 817 818 819 820 821 822 823 824 825 826 827 828
}

void BlockFreelist::print_on(outputStream* st) const {
  if (dictionary() == NULL) {
    return;
  }
  dictionary()->print_free_lists(st);
}

// VirtualSpaceNode methods

VirtualSpaceNode::~VirtualSpaceNode() {
  _rs.release();
829 830 831 832
#ifdef ASSERT
  size_t word_size = sizeof(*this) / BytesPerWord;
  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
#endif
833 834 835 836 837 838 839 840 841 842 843
}

size_t VirtualSpaceNode::used_words_in_vs() const {
  return pointer_delta(top(), bottom(), sizeof(MetaWord));
}

// Space committed in the VirtualSpace
size_t VirtualSpaceNode::capacity_words_in_vs() const {
  return pointer_delta(end(), bottom(), sizeof(MetaWord));
}

844 845 846
size_t VirtualSpaceNode::free_words_in_vs() const {
  return pointer_delta(end(), top(), sizeof(MetaWord));
}
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867

// Allocates the chunk from the virtual space only.
// This interface is also used internally for debugging.  Not all
// chunks removed here are necessarily used for allocation.
Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
  // Bottom of the new chunk
  MetaWord* chunk_limit = top();
  assert(chunk_limit != NULL, "Not safe to call this method");

  if (!is_available(chunk_word_size)) {
    if (TraceMetadataChunkAllocation) {
      tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
      // Dump some information about the virtual space that is nearly full
      print_on(tty);
    }
    return NULL;
  }

  // Take the space  (bump top on the current virtual space).
  inc_top(chunk_word_size);

868 869
  // Initialize the chunk
  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
  return result;
}


// Expand the virtual space (commit more of the reserved space)
bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
  size_t bytes = words * BytesPerWord;
  bool result =  virtual_space()->expand_by(bytes, pre_touch);
  if (TraceMetavirtualspaceAllocation && !result) {
    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
                           "for byte size " SIZE_FORMAT, bytes);
    virtual_space()->print();
  }
  return result;
}

// Shrink the virtual space (commit more of the reserved space)
bool VirtualSpaceNode::shrink_by(size_t words) {
  size_t bytes = words * BytesPerWord;
  virtual_space()->shrink_by(bytes);
  return true;
}

// Add another chunk to the chunk list.

Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
897 898 899 900 901
  Metachunk* result = take_from_committed(chunk_word_size);
  if (result != NULL) {
    inc_container_count();
  }
  return result;
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
}

Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
  assert_lock_strong(SpaceManager::expand_lock());

  Metachunk* new_chunk = get_chunk_vs(chunk_word_size);

  if (new_chunk == NULL) {
    // Only a small part of the virtualspace is committed when first
    // allocated so committing more here can be expected.
    size_t page_size_words = os::vm_page_size() / BytesPerWord;
    size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
                                                    page_size_words);
    expand_by(aligned_expand_vs_by_words, false);
    new_chunk = get_chunk_vs(chunk_word_size);
  }
  return new_chunk;
}

bool VirtualSpaceNode::initialize() {

  if (!_rs.is_reserved()) {
    return false;
  }

927 928 929 930
  // An allocation out of this Virtualspace that is larger
  // than an initial commit size can waste that initial committed
  // space.
  size_t committed_byte_size = 0;
931 932 933 934 935 936
  bool result = virtual_space()->initialize(_rs, committed_byte_size);
  if (result) {
    set_top((MetaWord*)virtual_space()->low());
    set_reserved(MemRegion((HeapWord*)_rs.base(),
                 (HeapWord*)(_rs.base() + _rs.size())));

937 938 939 940 941 942 943 944
    assert(reserved()->start() == (HeapWord*) _rs.base(),
      err_msg("Reserved start was not set properly " PTR_FORMAT
        " != " PTR_FORMAT, reserved()->start(), _rs.base()));
    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
      err_msg("Reserved size was not set properly " SIZE_FORMAT
        " != " SIZE_FORMAT, reserved()->word_size(),
        _rs.size() / BytesPerWord));
  }
945 946 947 948 949 950 951 952 953 954 955

  return result;
}

void VirtualSpaceNode::print_on(outputStream* st) const {
  size_t used = used_words_in_vs();
  size_t capacity = capacity_words_in_vs();
  VirtualSpace* vs = virtual_space();
  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
           "[" PTR_FORMAT ", " PTR_FORMAT ", "
           PTR_FORMAT ", " PTR_FORMAT ")",
956 957
           vs, capacity / K,
           capacity == 0 ? 0 : used * 100 / capacity,
958 959 960 961
           bottom(), top(), end(),
           vs->high_boundary());
}

962
#ifdef ASSERT
963 964 965 966
void VirtualSpaceNode::mangle() {
  size_t word_size = capacity_words_in_vs();
  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
}
967
#endif // ASSERT
968 969 970 971 972 973 974 975 976 977 978 979

// VirtualSpaceList methods
// Space allocated from the VirtualSpace

VirtualSpaceList::~VirtualSpaceList() {
  VirtualSpaceListIterator iter(virtual_space_list());
  while (iter.repeat()) {
    VirtualSpaceNode* vsl = iter.get_next();
    delete vsl;
  }
}

980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
void VirtualSpaceList::inc_virtual_space_total(size_t v) {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_total = _virtual_space_total + v;
}
void VirtualSpaceList::dec_virtual_space_total(size_t v) {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_total = _virtual_space_total - v;
}

void VirtualSpaceList::inc_virtual_space_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_count++;
}
void VirtualSpaceList::dec_virtual_space_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  _virtual_space_count--;
}

void ChunkManager::remove_chunk(Metachunk* chunk) {
  size_t word_size = chunk->word_size();
  ChunkIndex index = list_index(word_size);
  if (index != HumongousIndex) {
    free_chunks(index)->remove_chunk(chunk);
  } else {
    humongous_dictionary()->remove_chunk(chunk);
  }

  // Chunk is being removed from the chunks free list.
  dec_free_chunks_total(chunk->capacity_word_size());
}

// Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count.  Remove Metachunks in
// the node from their respective freelists.
void VirtualSpaceList::purge() {
  assert_lock_strong(SpaceManager::expand_lock());
  // Don't use a VirtualSpaceListIterator because this
  // list is being changed and a straightforward use of an iterator is not safe.
  VirtualSpaceNode* purged_vsl = NULL;
  VirtualSpaceNode* prev_vsl = virtual_space_list();
  VirtualSpaceNode* next_vsl = prev_vsl;
  while (next_vsl != NULL) {
    VirtualSpaceNode* vsl = next_vsl;
    next_vsl = vsl->next();
    // Don't free the current virtual space since it will likely
    // be needed soon.
    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
      // Unlink it from the list
      if (prev_vsl == vsl) {
        // This is the case of the current note being the first note.
        assert(vsl == virtual_space_list(), "Expected to be the first note");
        set_virtual_space_list(vsl->next());
      } else {
        prev_vsl->set_next(vsl->next());
      }

      vsl->purge(chunk_manager());
      dec_virtual_space_total(vsl->reserved()->word_size());
      dec_virtual_space_count();
      purged_vsl = vsl;
      delete vsl;
    } else {
      prev_vsl = vsl;
    }
  }
#ifdef ASSERT
  if (purged_vsl != NULL) {
  // List should be stable enough to use an iterator here.
  VirtualSpaceListIterator iter(virtual_space_list());
    while (iter.repeat()) {
      VirtualSpaceNode* vsl = iter.get_next();
      assert(vsl != purged_vsl, "Purge of vsl failed");
    }
  }
#endif
}

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
size_t VirtualSpaceList::used_words_sum() {
  size_t allocated_by_vs = 0;
  VirtualSpaceListIterator iter(virtual_space_list());
  while (iter.repeat()) {
    VirtualSpaceNode* vsl = iter.get_next();
    // Sum used region [bottom, top) in each virtualspace
    allocated_by_vs += vsl->used_words_in_vs();
  }
  assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
    err_msg("Total in free chunks " SIZE_FORMAT
            " greater than total from virtual_spaces " SIZE_FORMAT,
            allocated_by_vs, chunk_manager()->free_chunks_total()));
  size_t used =
    allocated_by_vs - chunk_manager()->free_chunks_total();
  return used;
}

// Space available in all MetadataVirtualspaces allocated
// for metadata.  This is the upper limit on the capacity
// of chunks allocated out of all the MetadataVirtualspaces.
size_t VirtualSpaceList::capacity_words_sum() {
  size_t capacity = 0;
  VirtualSpaceListIterator iter(virtual_space_list());
  while (iter.repeat()) {
    VirtualSpaceNode* vsl = iter.get_next();
    capacity += vsl->capacity_words_in_vs();
  }
  return capacity;
}

VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
                                   _is_class(false),
                                   _virtual_space_list(NULL),
                                   _current_virtual_space(NULL),
                                   _virtual_space_total(0),
                                   _virtual_space_count(0) {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  bool initialization_succeeded = grow_vs(word_size);

1097 1098 1099
  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
  assert(initialization_succeeded,
    " VirtualSpaceList initialization should not fail");
}

VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
                                   _is_class(true),
                                   _virtual_space_list(NULL),
                                   _current_virtual_space(NULL),
                                   _virtual_space_total(0),
                                   _virtual_space_count(0) {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  bool succeeded = class_entry->initialize();
1114 1115 1116
  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
1117 1118 1119 1120
  assert(succeeded, " VirtualSpaceList initialization should not fail");
  link_vs(class_entry, rs.size()/BytesPerWord);
}

1121 1122 1123 1124
size_t VirtualSpaceList::free_bytes() {
  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
}

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
// Allocate another meta virtual space and add it to the list.
bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
  if (vs_word_size == 0) {
    return false;
  }
  // Reserve the space
  size_t vs_byte_size = vs_word_size * BytesPerWord;
  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");

  // Allocate the meta virtual space and initialize it.
  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  if (!new_entry->initialize()) {
    delete new_entry;
    return false;
  } else {
1141 1142
    // ensure lock-free iteration sees fully initialized node
    OrderAccess::storestore();
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
    link_vs(new_entry, vs_word_size);
    return true;
  }
}

void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
  if (virtual_space_list() == NULL) {
      set_virtual_space_list(new_entry);
  } else {
    current_virtual_space()->set_next(new_entry);
  }
  set_current_virtual_space(new_entry);
  inc_virtual_space_total(vs_word_size);
  inc_virtual_space_count();
#ifdef ASSERT
  new_entry->mangle();
#endif
  if (TraceMetavirtualspaceAllocation && Verbose) {
    VirtualSpaceNode* vsl = current_virtual_space();
    vsl->print_on(tty);
  }
}

Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1167 1168
                                           size_t grow_chunks_by_words,
                                           size_t medium_chunk_bunch) {
1169 1170 1171 1172

  // Get a chunk from the chunk freelist
  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);

1173 1174 1175 1176
  if (next != NULL) {
    next->container()->inc_container_count();
  } else {
    // Allocate a chunk out of the current virtual space.
1177 1178 1179 1180 1181 1182
    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  }

  if (next == NULL) {
    // Not enough room in current virtual space.  Try to commit
    // more space.
1183 1184
    size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
                                     grow_chunks_by_words);
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
    size_t page_size_words = os::vm_page_size() / BytesPerWord;
    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
                                                        page_size_words);
    bool vs_expanded =
      current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
    if (!vs_expanded) {
      // Should the capacity of the metaspaces be expanded for
      // this allocation?  If it's the virtual space for classes and is
      // being used for CompressedHeaders, don't allocate a new virtualspace.
      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
        // Get another virtual space.
          size_t grow_vs_words =
            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
        if (grow_vs(grow_vs_words)) {
          // Got it.  It's on the list now.  Get a chunk from it.
          next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
        }
      } else {
        // Allocation will fail and induce a GC
        if (TraceMetadataChunkAllocation && Verbose) {
          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
            " Fail instead of expand the metaspace");
        }
      }
    } else {
      // The virtual space expanded, get a new chunk
      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
      assert(next != NULL, "Just expanded, should succeed");
    }
  }

1216 1217
  assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
         "New chunk is still on some list");
1218 1219 1220
  return next;
}

1221 1222 1223 1224 1225 1226 1227 1228 1229
Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
                                                      size_t chunk_bunch) {
  // Get a chunk from the chunk freelist
  Metachunk* new_chunk = get_new_chunk(chunk_word_size,
                                       chunk_word_size,
                                       chunk_bunch);
  return new_chunk;
}

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
void VirtualSpaceList::print_on(outputStream* st) const {
  if (TraceMetadataChunkAllocation && Verbose) {
    VirtualSpaceListIterator iter(virtual_space_list());
    while (iter.repeat()) {
      VirtualSpaceNode* node = iter.get_next();
      node->print_on(st);
    }
  }
}

bool VirtualSpaceList::contains(const void *ptr) {
  VirtualSpaceNode* list = virtual_space_list();
  VirtualSpaceListIterator iter(list);
  while (iter.repeat()) {
    VirtualSpaceNode* node = iter.get_next();
    if (node->reserved()->contains(ptr)) {
      return true;
    }
  }
  return false;
}


// MetaspaceGC methods

// VM_CollectForMetadataAllocation is the vm operation used to GC.
// Within the VM operation after the GC the attempt to allocate the metadata
// should succeed.  If the GC did not free enough space for the metaspace
// allocation, the HWM is increased so that another virtualspace will be
// allocated for the metadata.  With perm gen the increase in the perm
// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
// metaspace policy uses those as the small and large steps for the HWM.
//
// After the GC the compute_new_size() for MetaspaceGC is called to
// resize the capacity of the metaspaces.  The current implementation
1265
// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1266
// to resize the Java heap by some GC's.  New flags can be implemented
1267
// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
1268
// free space is desirable in the metaspace capacity to decide how much
1269
// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
// free space is desirable in the metaspace capacity before decreasing
// the HWM.

// Calculate the amount to increase the high water mark (HWM).
// Increase by a minimum amount (MinMetaspaceExpansion) so that
// another expansion is not requested too soon.  If that is not
// enough to satisfy the allocation (i.e. big enough for a word_size
// allocation), increase by MaxMetaspaceExpansion.  If that is still
// not enough, expand by the size of the allocation (word_size) plus
// some.
size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  size_t before_inc = MetaspaceGC::capacity_until_GC();
  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  size_t page_size_words = os::vm_page_size() / BytesPerWord;
  size_t size_delta_words = align_size_up(word_size, page_size_words);
  size_t delta_words = MAX2(size_delta_words, min_delta_words);
  if (delta_words > min_delta_words) {
    // Don't want to hit the high water mark on the next
    // allocation so make the delta greater than just enough
    // for this allocation.
    delta_words = MAX2(delta_words, max_delta_words);
    if (delta_words > max_delta_words) {
      // This allocation is large but the next ones are probably not
      // so increase by the minimum.
      delta_words = delta_words + min_delta_words;
    }
  }
  return delta_words;
}

bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1302

1303
  // If the user wants a limit, impose one.
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
  // The reason for someone using this flag is to limit reserved space.  So
  // for non-class virtual space, compare against virtual spaces that are reserved.
  // For class virtual space, we only compare against the committed space, not
  // reserved space, because this is a larger space prereserved for compressed
  // class pointers.
  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
    size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
    if (real_allocated >= MaxMetaspaceSize) {
      return false;
    }
1315 1316
  }

1317 1318
  // Class virtual space should always be expanded.  Call GC for the other
  // metadata virtual space.
1319 1320
  if (Metaspace::using_class_space() &&
      (vsl == Metaspace::class_space_list())) return true;
1321

1322 1323
  // If this is part of an allocation after a GC, expand
  // unconditionally.
1324
  if (MetaspaceGC::expand_after_GC()) {
1325 1326 1327
    return true;
  }

1328

1329 1330 1331 1332
  // If the capacity is below the minimum capacity, allow the
  // expansion.  Also set the high-water-mark (capacity_until_GC)
  // to that minimum capacity so that a GC will not be induced
  // until that minimum capacity is exceeded.
1333 1334
  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
  size_t metaspace_size_bytes = MetaspaceSize;
1335
  if (committed_capacity_bytes < metaspace_size_bytes ||
1336
      capacity_until_GC() == 0) {
1337
    set_capacity_until_GC(metaspace_size_bytes);
1338 1339
    return true;
  } else {
1340
    if (committed_capacity_bytes < capacity_until_GC()) {
1341 1342 1343 1344 1345
      return true;
    } else {
      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
                        "  capacity_until_GC " SIZE_FORMAT
1346
                        "  allocated_capacity_bytes " SIZE_FORMAT,
1347 1348
                        word_size,
                        capacity_until_GC(),
1349
                        MetaspaceAux::allocated_capacity_bytes());
1350 1351 1352 1353 1354 1355
      }
      return false;
    }
  }
}

1356

1357 1358 1359 1360 1361 1362

void MetaspaceGC::compute_new_size() {
  assert(_shrink_factor <= 100, "invalid shrink factor");
  uint current_shrink_factor = _shrink_factor;
  _shrink_factor = 0;

1363 1364 1365 1366
  // Until a faster way of calculating the "used" quantity is implemented,
  // use "capacity".
  const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1367

1368
  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
  const double maximum_used_percentage = 1.0 - minimum_free_percentage;

  const double min_tmp = used_after_gc / maximum_used_percentage;
  size_t minimum_desired_capacity =
    (size_t)MIN2(min_tmp, double(max_uintx));
  // Don't shrink less than the initial generation size
  minimum_desired_capacity = MAX2(minimum_desired_capacity,
                                  MetaspaceSize);

  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
    gclog_or_tty->print_cr("  "
                  "  minimum_free_percentage: %6.2f"
                  "  maximum_used_percentage: %6.2f",
                  minimum_free_percentage,
                  maximum_used_percentage);
    gclog_or_tty->print_cr("  "
1386 1387
                  "   used_after_gc       : %6.1fKB",
                  used_after_gc / (double) K);
1388 1389 1390
  }


1391
  size_t shrink_bytes = 0;
1392 1393 1394 1395 1396 1397
  if (capacity_until_GC < minimum_desired_capacity) {
    // If we have less capacity below the metaspace HWM, then
    // increment the HWM.
    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
    // Don't expand unless it's significant
    if (expand_bytes >= MinMetaspaceExpansion) {
1398
      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
1399 1400
    }
    if (PrintGCDetails && Verbose) {
1401
      size_t new_capacity_until_GC = capacity_until_GC;
1402
      gclog_or_tty->print_cr("    expanding:"
1403 1404 1405 1406
                    "  minimum_desired_capacity: %6.1fKB"
                    "  expand_bytes: %6.1fKB"
                    "  MinMetaspaceExpansion: %6.1fKB"
                    "  new metaspace HWM:  %6.1fKB",
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
                    minimum_desired_capacity / (double) K,
                    expand_bytes / (double) K,
                    MinMetaspaceExpansion / (double) K,
                    new_capacity_until_GC / (double) K);
    }
    return;
  }

  // No expansion, now see if we want to shrink
  // We would never want to shrink more than this
1417 1418 1419
  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
    max_shrink_bytes));
1420 1421

  // Should shrinking be considered?
1422 1423
  if (MaxMetaspaceFreeRatio < 100) {
    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1424 1425 1426 1427 1428
    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
    const double max_tmp = used_after_gc / minimum_used_percentage;
    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
    maximum_desired_capacity = MAX2(maximum_desired_capacity,
                                    MetaspaceSize);
1429
    if (PrintGCDetails && Verbose) {
1430 1431 1432 1433 1434 1435
      gclog_or_tty->print_cr("  "
                             "  maximum_free_percentage: %6.2f"
                             "  minimum_used_percentage: %6.2f",
                             maximum_free_percentage,
                             minimum_used_percentage);
      gclog_or_tty->print_cr("  "
1436 1437
                             "  minimum_desired_capacity: %6.1fKB"
                             "  maximum_desired_capacity: %6.1fKB",
1438 1439 1440 1441 1442 1443 1444 1445 1446
                             minimum_desired_capacity / (double) K,
                             maximum_desired_capacity / (double) K);
    }

    assert(minimum_desired_capacity <= maximum_desired_capacity,
           "sanity check");

    if (capacity_until_GC > maximum_desired_capacity) {
      // Capacity too large, compute shrinking size
1447
      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1448 1449 1450 1451 1452 1453
      // We don't want shrink all the way back to initSize if people call
      // System.gc(), because some programs do that between "phases" and then
      // we'd just have to grow the heap up again for the next phase.  So we
      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
      // on the third call, and 100% by the fourth call.  But if we recompute
      // size without shrinking, it goes back to 0%.
1454 1455
      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
      assert(shrink_bytes <= max_shrink_bytes,
1456
        err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1457
          shrink_bytes, max_shrink_bytes));
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
      if (current_shrink_factor == 0) {
        _shrink_factor = 10;
      } else {
        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
      }
      if (PrintGCDetails && Verbose) {
        gclog_or_tty->print_cr("  "
                      "  shrinking:"
                      "  initSize: %.1fK"
                      "  maximum_desired_capacity: %.1fK",
                      MetaspaceSize / (double) K,
                      maximum_desired_capacity / (double) K);
        gclog_or_tty->print_cr("  "
1471
                      "  shrink_bytes: %.1fK"
1472 1473 1474
                      "  current_shrink_factor: %d"
                      "  new shrink factor: %d"
                      "  MinMetaspaceExpansion: %.1fK",
1475
                      shrink_bytes / (double) K,
1476 1477 1478 1479 1480 1481 1482 1483
                      current_shrink_factor,
                      _shrink_factor,
                      MinMetaspaceExpansion / (double) K);
      }
    }
  }

  // Don't shrink unless it's significant
1484 1485 1486
  if (shrink_bytes >= MinMetaspaceExpansion &&
      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
  }
}

// Metadebug methods

void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
                                       size_t chunk_word_size){
#ifdef ASSERT
  VirtualSpaceList* vsl = sm->vs_list();
  if (MetaDataDeallocateALot &&
      Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
    Metadebug::reset_deallocate_chunk_a_lot_count();
    for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
      Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
      if (dummy_chunk == NULL) {
        break;
      }
      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);

      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
                               sm->sum_count_in_chunks_in_use());
        dummy_chunk->print_on(gclog_or_tty);
        gclog_or_tty->print_cr("  Free chunks total %d  count %d",
                               vsl->chunk_manager()->free_chunks_total(),
                               vsl->chunk_manager()->free_chunks_count());
      }
    }
  } else {
    Metadebug::inc_deallocate_chunk_a_lot_count();
  }
#endif
}

void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
                                       size_t raw_word_size){
#ifdef ASSERT
  if (MetaDataDeallocateALot &&
        Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
    Metadebug::set_deallocate_block_a_lot_count(0);
    for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1528
      MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1529 1530 1531
      if (dummy_block == 0) {
        break;
      }
1532
      sm->deallocate(dummy_block, raw_word_size);
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
    }
  } else {
    Metadebug::inc_deallocate_block_a_lot_count();
  }
#endif
}

void Metadebug::init_allocation_fail_alot_count() {
  if (MetadataAllocationFailALot) {
    _allocation_fail_alot_count =
      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
  }
}

#ifdef ASSERT
bool Metadebug::test_metadata_failure() {
  if (MetadataAllocationFailALot &&
      Threads::is_vm_complete()) {
    if (_allocation_fail_alot_count > 0) {
      _allocation_fail_alot_count--;
    } else {
      if (TraceMetadataChunkAllocation && Verbose) {
        gclog_or_tty->print_cr("Metadata allocation failing for "
                               "MetadataAllocationFailALot");
      }
      init_allocation_fail_alot_count();
      return true;
    }
  }
  return false;
}
#endif

// ChunkManager methods

size_t ChunkManager::free_chunks_total() {
  return _free_chunks_total;
}

size_t ChunkManager::free_chunks_total_in_bytes() {
  return free_chunks_total() * BytesPerWord;
}

size_t ChunkManager::free_chunks_count() {
#ifdef ASSERT
  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
    MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
    // This lock is only needed in debug because the verification
    // of the _free_chunks_totals walks the list of free chunks
1583
    slow_locked_verify_free_chunks_count();
1584 1585
  }
#endif
1586
  return _free_chunks_count;
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
}

void ChunkManager::locked_verify_free_chunks_total() {
  assert_lock_strong(SpaceManager::expand_lock());
  assert(sum_free_chunks() == _free_chunks_total,
    err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
           " same as sum " SIZE_FORMAT, _free_chunks_total,
           sum_free_chunks()));
}

void ChunkManager::verify_free_chunks_total() {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify_free_chunks_total();
}

void ChunkManager::locked_verify_free_chunks_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  assert(sum_free_chunks_count() == _free_chunks_count,
    err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
           " same as sum " SIZE_FORMAT, _free_chunks_count,
           sum_free_chunks_count()));
}

void ChunkManager::verify_free_chunks_count() {
#ifdef ASSERT
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify_free_chunks_count();
#endif
}

void ChunkManager::verify() {
1620 1621 1622
  MutexLockerEx cl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
  locked_verify();
1623 1624 1625 1626
}

void ChunkManager::locked_verify() {
  locked_verify_free_chunks_count();
1627
  locked_verify_free_chunks_total();
1628 1629 1630 1631
}

void ChunkManager::locked_print_free_chunks(outputStream* st) {
  assert_lock_strong(SpaceManager::expand_lock());
1632
  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1633 1634 1635 1636 1637
                _free_chunks_total, _free_chunks_count);
}

void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
  assert_lock_strong(SpaceManager::expand_lock());
1638
  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
                sum_free_chunks(), sum_free_chunks_count());
}
ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
  return &_free_chunks[index];
}

// These methods that sum the free chunk lists are used in printing
// methods that are used in product builds.
size_t ChunkManager::sum_free_chunks() {
  assert_lock_strong(SpaceManager::expand_lock());
  size_t result = 0;
1650
  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1651 1652 1653 1654 1655 1656
    ChunkList* list = free_chunks(i);

    if (list == NULL) {
      continue;
    }

1657
    result = result + list->count() * list->size();
1658
  }
1659
  result = result + humongous_dictionary()->total_size();
1660 1661 1662 1663 1664 1665
  return result;
}

size_t ChunkManager::sum_free_chunks_count() {
  assert_lock_strong(SpaceManager::expand_lock());
  size_t count = 0;
1666
  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1667 1668 1669 1670
    ChunkList* list = free_chunks(i);
    if (list == NULL) {
      continue;
    }
1671
    count = count + list->count();
1672
  }
1673
  count = count + humongous_dictionary()->total_free_blocks();
1674 1675 1676 1677
  return count;
}

ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1678 1679 1680
  ChunkIndex index = list_index(word_size);
  assert(index < HumongousIndex, "No humongous list");
  return free_chunks(index);
1681 1682 1683 1684 1685 1686 1687 1688 1689
}

void ChunkManager::free_chunks_put(Metachunk* chunk) {
  assert_lock_strong(SpaceManager::expand_lock());
  ChunkList* free_list = find_free_chunks_list(chunk->word_size());
  chunk->set_next(free_list->head());
  free_list->set_head(chunk);
  // chunk is being returned to the chunk free list
  inc_free_chunks_total(chunk->capacity_word_size());
1690
  slow_locked_verify();
1691 1692 1693 1694 1695 1696 1697
}

void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
  // The deallocation of a chunk originates in the freelist
  // manangement code for a Metaspace and does not hold the
  // lock.
  assert(chunk != NULL, "Deallocating NULL");
1698 1699
  assert_lock_strong(SpaceManager::expand_lock());
  slow_locked_verify();
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
  if (TraceMetadataChunkAllocation) {
    tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
                  PTR_FORMAT "  size " SIZE_FORMAT,
                  chunk, chunk->word_size());
  }
  free_chunks_put(chunk);
}

Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  assert_lock_strong(SpaceManager::expand_lock());

1711
  slow_locked_verify();
1712

1713
  Metachunk* chunk = NULL;
1714
  if (list_index(word_size) != HumongousIndex) {
1715 1716
    ChunkList* free_list = find_free_chunks_list(word_size);
    assert(free_list != NULL, "Sanity check");
1717

1718 1719 1720 1721 1722 1723
    chunk = free_list->head();
    debug_only(Metachunk* debug_head = chunk;)

    if (chunk == NULL) {
      return NULL;
    }
1724 1725

    // Remove the chunk as the head of the list.
1726
    free_list->remove_chunk(chunk);
1727 1728

    // Chunk is being removed from the chunks free list.
1729
    dec_free_chunks_total(chunk->capacity_word_size());
1730 1731 1732 1733 1734 1735 1736

    if (TraceMetadataChunkAllocation && Verbose) {
      tty->print_cr("ChunkManager::free_chunks_get: free_list "
                    PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
                    free_list, chunk, chunk->word_size());
    }
  } else {
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
    chunk = humongous_dictionary()->get_chunk(
      word_size,
      FreeBlockDictionary<Metachunk>::atLeast);

    if (chunk != NULL) {
      if (TraceMetadataHumongousAllocation) {
        size_t waste = chunk->word_size() - word_size;
        tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
                      " for requested size " SIZE_FORMAT
                      " waste " SIZE_FORMAT,
                      chunk->word_size(), word_size, waste);
1748
      }
1749 1750
      // Chunk is being removed from the chunks free list.
      dec_free_chunks_total(chunk->capacity_word_size());
1751 1752
    } else {
      return NULL;
1753 1754
    }
  }
1755 1756 1757 1758

  // Remove it from the links to this freelist
  chunk->set_next(NULL);
  chunk->set_prev(NULL);
1759 1760 1761 1762 1763
#ifdef ASSERT
  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
  // work.
  chunk->set_is_free(false);
#endif
1764
  slow_locked_verify();
1765 1766 1767 1768 1769
  return chunk;
}

Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
  assert_lock_strong(SpaceManager::expand_lock());
1770
  slow_locked_verify();
1771 1772 1773 1774 1775 1776 1777

  // Take from the beginning of the list
  Metachunk* chunk = free_chunks_get(word_size);
  if (chunk == NULL) {
    return NULL;
  }

1778 1779 1780
  assert((word_size <= chunk->word_size()) ||
         list_index(chunk->word_size() == HumongousIndex),
         "Non-humongous variable sized chunk");
1781
  if (TraceMetadataChunkAllocation) {
1782 1783 1784
    size_t list_count;
    if (list_index(word_size) < HumongousIndex) {
      ChunkList* list = find_free_chunks_list(word_size);
1785
      list_count = list->count();
1786 1787 1788 1789 1790 1791
    } else {
      list_count = humongous_dictionary()->total_count();
    }
    tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
               PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
               this, chunk, chunk->word_size(), list_count);
1792 1793 1794 1795 1796 1797
    locked_print_free_chunks(tty);
  }

  return chunk;
}

1798 1799 1800 1801 1802 1803
void ChunkManager::print_on(outputStream* out) {
  if (PrintFLSStatistics != 0) {
    humongous_dictionary()->report_statistics();
  }
}

1804 1805
// SpaceManager methods

1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                                           size_t* chunk_word_size,
                                           size_t* class_chunk_word_size) {
  switch (type) {
  case Metaspace::BootMetaspaceType:
    *chunk_word_size = Metaspace::first_chunk_word_size();
    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
    break;
  case Metaspace::ROMetaspaceType:
    *chunk_word_size = SharedReadOnlySize / wordSize;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  case Metaspace::ReadWriteMetaspaceType:
    *chunk_word_size = SharedReadWriteSize / wordSize;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  case Metaspace::AnonymousMetaspaceType:
  case Metaspace::ReflectionMetaspaceType:
    *chunk_word_size = SpecializedChunk;
    *class_chunk_word_size = ClassSpecializedChunk;
    break;
  default:
    *chunk_word_size = SmallChunk;
    *class_chunk_word_size = ClassSmallChunk;
    break;
  }
1832
  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1833 1834
    err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
            " class " SIZE_FORMAT,
1835
            *chunk_word_size, *class_chunk_word_size));
1836 1837
}

1838 1839 1840
size_t SpaceManager::sum_free_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t free = 0;
1841
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
    Metachunk* chunk = chunks_in_use(i);
    while (chunk != NULL) {
      free += chunk->free_word_size();
      chunk = chunk->next();
    }
  }
  return free;
}

size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t result = 0;
1854
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1855 1856
   result += sum_waste_in_chunks_in_use(i);
  }
1857

1858 1859 1860 1861 1862 1863 1864 1865
  return result;
}

size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
  size_t result = 0;
  Metachunk* chunk = chunks_in_use(index);
  // Count the free space in all the chunk but not the
  // current chunk from which allocations are still being done.
1866 1867
  while (chunk != NULL) {
    if (chunk != current_chunk()) {
1868
      result += chunk->free_word_size();
1869
    }
1870
    chunk = chunk->next();
1871 1872 1873 1874 1875
  }
  return result;
}

size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
  // For CMS use "allocated_chunks_words()" which does not need the
  // Metaspace lock.  For the other collectors sum over the
  // lists.  Use both methods as a check that "allocated_chunks_words()"
  // is correct.  That is, sum_capacity_in_chunks() is too expensive
  // to use in the product and allocated_chunks_words() should be used
  // but allow for  checking that allocated_chunks_words() returns the same
  // value as sum_capacity_in_chunks_in_use() which is the definitive
  // answer.
  if (UseConcMarkSweepGC) {
    return allocated_chunks_words();
  } else {
    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
    size_t sum = 0;
    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
      Metachunk* chunk = chunks_in_use(i);
      while (chunk != NULL) {
        sum += chunk->capacity_word_size();
        chunk = chunk->next();
      }
1895 1896
    }
  return sum;
1897
  }
1898 1899 1900 1901
}

size_t SpaceManager::sum_count_in_chunks_in_use() {
  size_t count = 0;
1902
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1903 1904
    count = count + sum_count_in_chunks_in_use(i);
  }
1905

1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
  return count;
}

size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
  size_t count = 0;
  Metachunk* chunk = chunks_in_use(i);
  while (chunk != NULL) {
    count++;
    chunk = chunk->next();
  }
  return count;
}


size_t SpaceManager::sum_used_in_chunks_in_use() const {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  size_t used = 0;
1923
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
    Metachunk* chunk = chunks_in_use(i);
    while (chunk != NULL) {
      used += chunk->used_word_size();
      chunk = chunk->next();
    }
  }
  return used;
}

void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {

1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
    Metachunk* chunk = chunks_in_use(i);
    st->print("SpaceManager: %s " PTR_FORMAT,
                 chunk_size_name(i), chunk);
    if (chunk != NULL) {
      st->print_cr(" free " SIZE_FORMAT,
                   chunk->free_word_size());
    } else {
      st->print_cr("");
    }
  }
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958

  vs_list()->chunk_manager()->locked_print_free_chunks(st);
  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
}

size_t SpaceManager::calc_chunk_size(size_t word_size) {

  // Decide between a small chunk and a medium chunk.  Up to
  // _small_chunk_limit small chunks can be allocated but
  // once a medium chunk has been allocated, no more small
  // chunks will be allocated.
  size_t chunk_word_size;
  if (chunks_in_use(MediumIndex) == NULL &&
1959
      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1960 1961 1962
    chunk_word_size = (size_t) small_chunk_size();
    if (word_size + Metachunk::overhead() > small_chunk_size()) {
      chunk_word_size = medium_chunk_size();
1963 1964
    }
  } else {
1965
    chunk_word_size = medium_chunk_size();
1966 1967
  }

1968 1969 1970 1971 1972 1973
  // Might still need a humongous chunk.  Enforce an
  // eight word granularity to facilitate reuse (some
  // wastage but better chance of reuse).
  size_t if_humongous_sized_chunk =
    align_size_up(word_size + Metachunk::overhead(),
                  HumongousChunkGranularity);
1974
  chunk_word_size =
1975
    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1976

1977 1978 1979 1980 1981
  assert(!SpaceManager::is_humongous(word_size) ||
         chunk_word_size == if_humongous_sized_chunk,
         err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
                 " chunk_word_size " SIZE_FORMAT,
                 word_size, chunk_word_size));
1982 1983 1984 1985 1986 1987
  if (TraceMetadataHumongousAllocation &&
      SpaceManager::is_humongous(word_size)) {
    gclog_or_tty->print_cr("Metadata humongous allocation:");
    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
                           chunk_word_size);
1988
    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
1989 1990 1991 1992 1993
                           Metachunk::overhead());
  }
  return chunk_word_size;
}

1994
MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1995 1996 1997 1998 1999 2000 2001 2002
  assert(vs_list()->current_virtual_space() != NULL,
         "Should have been set");
  assert(current_chunk() == NULL ||
         current_chunk()->allocate(word_size) == NULL,
         "Don't need to expand");
  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);

  if (TraceMetadataChunkAllocation && Verbose) {
2003 2004 2005 2006 2007 2008
    size_t words_left = 0;
    size_t words_used = 0;
    if (current_chunk() != NULL) {
      words_left = current_chunk()->free_word_size();
      words_used = current_chunk()->used_word_size();
    }
2009
    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2010 2011 2012
                           " words " SIZE_FORMAT " words used " SIZE_FORMAT
                           " words left",
                            word_size, words_used, words_left);
2013 2014 2015 2016
  }

  // Get another chunk out of the virtual space
  size_t grow_chunks_by_words = calc_chunk_size(word_size);
2017
  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031

  // If a chunk was available, add it to the in-use chunk list
  // and do an allocation from it.
  if (next != NULL) {
    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
    // Add to this manager's list of chunks in use.
    add_chunk(next, false);
    return next->allocate(word_size);
  }
  return NULL;
}

void SpaceManager::print_on(outputStream* st) const {

2032
  for (ChunkIndex i = ZeroIndex;
2033
       i < NumberOfInUseLists ;
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
       i = next_chunk_index(i) ) {
    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
                 chunks_in_use(i),
                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
  }
  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
               " Humongous " SIZE_FORMAT,
               sum_waste_in_chunks_in_use(SmallIndex),
               sum_waste_in_chunks_in_use(MediumIndex),
               sum_waste_in_chunks_in_use(HumongousIndex));
2044 2045 2046 2047 2048
  // block free lists
  if (block_freelists() != NULL) {
    st->print_cr("total in block free lists " SIZE_FORMAT,
      block_freelists()->total_size());
  }
2049 2050
}

2051 2052
SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
                           Mutex* lock,
2053
                           VirtualSpaceList* vs_list) :
2054
  _vs_list(vs_list),
2055
  _mdtype(mdtype),
2056 2057 2058
  _allocated_blocks_words(0),
  _allocated_chunks_words(0),
  _allocated_chunks_count(0),
2059 2060 2061 2062 2063
  _lock(lock)
{
  initialize();
}

2064 2065 2066 2067 2068 2069 2070
void SpaceManager::inc_size_metrics(size_t words) {
  assert_lock_strong(SpaceManager::expand_lock());
  // Total of allocated Metachunks and allocated Metachunks count
  // for each SpaceManager
  _allocated_chunks_words = _allocated_chunks_words + words;
  _allocated_chunks_count++;
  // Global total of capacity in allocated Metachunks
2071
  MetaspaceAux::inc_capacity(mdtype(), words);
2072 2073 2074 2075 2076
  // Global total of allocated Metablocks.
  // used_words_slow() includes the overhead in each
  // Metachunk so include it in the used when the
  // Metachunk is first added (so only added once per
  // Metachunk).
2077
  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2078 2079 2080 2081 2082 2083
}

void SpaceManager::inc_used_metrics(size_t words) {
  // Add to the per SpaceManager total
  Atomic::add_ptr(words, &_allocated_blocks_words);
  // Add to the global total
2084
  MetaspaceAux::inc_used(mdtype(), words);
2085 2086 2087
}

void SpaceManager::dec_total_from_size_metrics() {
2088 2089
  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2090
  // Also deduct the overhead per Metachunk
2091
  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2092 2093
}

2094
void SpaceManager::initialize() {
2095
  Metadebug::init_allocation_fail_alot_count();
2096
  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2097 2098 2099 2100 2101 2102 2103 2104
    _chunks_in_use[i] = NULL;
  }
  _current_chunk = NULL;
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
  }
}

2105 2106 2107 2108 2109 2110 2111 2112 2113
void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
  if (chunks == NULL) {
    return;
  }
  ChunkList* list = free_chunks(index);
  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
  assert_lock_strong(SpaceManager::expand_lock());
  Metachunk* cur = chunks;

2114
  // This returns chunks one at a time.  If a new
2115 2116 2117 2118
  // class List can be created that is a base class
  // of FreeList then something like FreeList::prepend()
  // can be used in place of this loop
  while (cur != NULL) {
2119 2120
    assert(cur->container() != NULL, "Container should have been set");
    cur->container()->dec_container_count();
2121 2122 2123 2124 2125 2126 2127 2128 2129
    // Capture the next link before it is changed
    // by the call to return_chunk_at_head();
    Metachunk* next = cur->next();
    cur->set_is_free(true);
    list->return_chunk_at_head(cur);
    cur = next;
  }
}

2130
SpaceManager::~SpaceManager() {
2131
  // This call this->_lock which can't be done while holding expand_lock()
2132 2133 2134 2135
  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
            " allocated_chunks_words() " SIZE_FORMAT,
            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2136

2137 2138 2139 2140 2141
  MutexLockerEx fcl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);

  ChunkManager* chunk_manager = vs_list()->chunk_manager();

2142
  chunk_manager->slow_locked_verify();
2143

2144 2145
  dec_total_from_size_metrics();

2146 2147 2148 2149 2150
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
    locked_print_chunks_in_use_on(gclog_or_tty);
  }

2151 2152
  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
  // is during the freeing of a VirtualSpaceNodes.
2153

2154 2155
  // Have to update before the chunks_in_use lists are emptied
  // below.
2156
  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
2157 2158 2159 2160 2161
                                       sum_count_in_chunks_in_use());

  // Add all the chunks in use by this space manager
  // to the global list of free chunks.

2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
  // Follow each list of chunks-in-use and add them to the
  // free lists.  Each list is NULL terminated.

  for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print_cr("returned %d %s chunks to freelist",
                             sum_count_in_chunks_in_use(i),
                             chunk_size_name(i));
    }
    Metachunk* chunks = chunks_in_use(i);
2172
    chunk_manager->return_chunks(i, chunks);
2173 2174 2175
    set_chunks_in_use(i, NULL);
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print_cr("updated freelist count %d %s",
2176
                             chunk_manager->free_chunks(i)->count(),
2177 2178 2179
                             chunk_size_name(i));
    }
    assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2180 2181
  }

2182 2183 2184 2185
  // The medium chunk case may be optimized by passing the head and
  // tail of the medium chunk list to add_at_head().  The tail is often
  // the current chunk but there are probably exceptions.

2186
  // Humongous chunks
2187 2188 2189 2190 2191 2192
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
                            sum_count_in_chunks_in_use(HumongousIndex),
                            chunk_size_name(HumongousIndex));
    gclog_or_tty->print("Humongous chunk dictionary: ");
  }
2193 2194 2195
  // Humongous chunks are never the current chunk.
  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);

2196 2197 2198 2199
  while (humongous_chunks != NULL) {
#ifdef ASSERT
    humongous_chunks->set_is_free(true);
#endif
2200 2201 2202 2203 2204 2205 2206 2207 2208
    if (TraceMetadataChunkAllocation && Verbose) {
      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
                          humongous_chunks,
                          humongous_chunks->word_size());
    }
    assert(humongous_chunks->word_size() == (size_t)
           align_size_up(humongous_chunks->word_size(),
                             HumongousChunkGranularity),
           err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2209
                   " granularity %d",
2210
                   humongous_chunks->word_size(), HumongousChunkGranularity));
2211
    Metachunk* next_humongous_chunks = humongous_chunks->next();
2212
    humongous_chunks->container()->dec_container_count();
2213 2214
    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
    humongous_chunks = next_humongous_chunks;
2215
  }
2216 2217 2218 2219 2220 2221
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print_cr("");
    gclog_or_tty->print_cr("updated dictionary count %d %s",
                     chunk_manager->humongous_dictionary()->total_count(),
                     chunk_size_name(HumongousIndex));
  }
2222
  chunk_manager->slow_locked_verify();
2223 2224
}

2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  switch (index) {
    case SpecializedIndex:
      return "Specialized";
    case SmallIndex:
      return "Small";
    case MediumIndex:
      return "Medium";
    case HumongousIndex:
      return "Humongous";
    default:
      return NULL;
  }
}

ChunkIndex ChunkManager::list_index(size_t size) {
  switch (size) {
    case SpecializedChunk:
      assert(SpecializedChunk == ClassSpecializedChunk,
             "Need branch for ClassSpecializedChunk");
      return SpecializedIndex;
    case SmallChunk:
    case ClassSmallChunk:
      return SmallIndex;
    case MediumChunk:
    case ClassMediumChunk:
      return MediumIndex;
    default:
2253
      assert(size > MediumChunk || size > ClassMediumChunk,
2254 2255 2256 2257 2258
             "Not a humongous chunk");
      return HumongousIndex;
  }
}

2259
void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2260
  assert_lock_strong(_lock);
2261
  size_t raw_word_size = get_raw_word_size(word_size);
2262
  size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2263
  assert(raw_word_size >= min_size,
2264
         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2265
  block_freelists()->return_block(p, raw_word_size);
2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
}

// Adds a chunk to the list of chunks in use.
void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {

  assert(new_chunk != NULL, "Should not be NULL");
  assert(new_chunk->next() == NULL, "Should not be on a list");

  new_chunk->reset_empty();

  // Find the correct list and and set the current
  // chunk for that list.
2278
  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2279

2280
  if (index != HumongousIndex) {
2281
    set_current_chunk(new_chunk);
2282 2283 2284
    new_chunk->set_next(chunks_in_use(index));
    set_chunks_in_use(index, new_chunk);
  } else {
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
    // For null class loader data and DumpSharedSpaces, the first chunk isn't
    // small, so small will be null.  Link this first chunk as the current
    // chunk.
    if (make_current) {
      // Set as the current chunk but otherwise treat as a humongous chunk.
      set_current_chunk(new_chunk);
    }
    // Link at head.  The _current_chunk only points to a humongous chunk for
    // the null class loader metaspace (class and data virtual space managers)
    // any humongous chunks so will not point to the tail
    // of the humongous chunks list.
    new_chunk->set_next(chunks_in_use(HumongousIndex));
    set_chunks_in_use(HumongousIndex, new_chunk);

2299
    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2300 2301
  }

2302 2303 2304
  // Add to the running sum of capacity
  inc_size_metrics(new_chunk->word_size());

2305 2306 2307 2308 2309
  assert(new_chunk->is_empty(), "Not ready for reuse");
  if (TraceMetadataChunkAllocation && Verbose) {
    gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
                        sum_count_in_chunks_in_use());
    new_chunk->print_on(gclog_or_tty);
2310 2311 2312
    if (vs_list() != NULL) {
      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
    }
2313 2314 2315
  }
}

2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
Metachunk* SpaceManager::get_new_chunk(size_t word_size,
                                       size_t grow_chunks_by_words) {

  Metachunk* next = vs_list()->get_new_chunk(word_size,
                                             grow_chunks_by_words,
                                             medium_chunk_bunch());

  if (TraceMetadataHumongousAllocation &&
      SpaceManager::is_humongous(next->word_size())) {
    gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
                           next->word_size());
  }

  return next;
}

2332 2333 2334
MetaWord* SpaceManager::allocate(size_t word_size) {
  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);

2335
  size_t raw_word_size = get_raw_word_size(word_size);
2336
  BlockFreelist* fl =  block_freelists();
2337
  MetaWord* p = NULL;
2338 2339 2340 2341 2342
  // Allocation from the dictionary is expensive in the sense that
  // the dictionary has to be searched for a size.  Don't allocate
  // from the dictionary until it starts to get fat.  Is this
  // a reasonable policy?  Maybe an skinny dictionary is fast enough
  // for allocations.  Do some profiling.  JJJ
2343 2344
  if (fl->total_size() > allocation_from_dictionary_limit) {
    p = fl->get_block(raw_word_size);
2345
  }
2346 2347
  if (p == NULL) {
    p = allocate_work(raw_word_size);
2348 2349 2350
  }
  Metadebug::deallocate_block_a_lot(this, raw_word_size);

2351
  return p;
2352 2353 2354 2355
}

// Returns the address of spaced allocated for "word_size".
// This methods does not know about blocks (Metablocks)
2356
MetaWord* SpaceManager::allocate_work(size_t word_size) {
2357 2358 2359 2360 2361 2362 2363
  assert_lock_strong(_lock);
#ifdef ASSERT
  if (Metadebug::test_metadata_failure()) {
    return NULL;
  }
#endif
  // Is there space in the current chunk?
2364
  MetaWord* result = NULL;
2365 2366 2367 2368 2369 2370

  // For DumpSharedSpaces, only allocate out of the current chunk which is
  // never null because we gave it the size we wanted.   Caller reports out
  // of memory if this returns null.
  if (DumpSharedSpaces) {
    assert(current_chunk() != NULL, "should never happen");
2371
    inc_used_metrics(word_size);
2372 2373 2374 2375 2376 2377 2378 2379 2380
    return current_chunk()->allocate(word_size); // caller handles null result
  }
  if (current_chunk() != NULL) {
    result = current_chunk()->allocate(word_size);
  }

  if (result == NULL) {
    result = grow_and_allocate(word_size);
  }
2381
  if (result != 0) {
2382
    inc_used_metrics(word_size);
2383 2384
    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
           "Head of the list is being allocated");
2385 2386 2387 2388 2389 2390 2391 2392 2393
  }

  return result;
}

void SpaceManager::verify() {
  // If there are blocks in the dictionary, then
  // verfication of chunks does not work since
  // being in the dictionary alters a chunk.
2394
  if (block_freelists()->total_size() == 0) {
2395
    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2396 2397 2398
      Metachunk* curr = chunks_in_use(i);
      while (curr != NULL) {
        curr->verify();
2399
        verify_chunk_size(curr);
2400 2401 2402 2403 2404 2405
        curr = curr->next();
      }
    }
  }
}

2406 2407
void SpaceManager::verify_chunk_size(Metachunk* chunk) {
  assert(is_humongous(chunk->word_size()) ||
2408 2409 2410
         chunk->word_size() == medium_chunk_size() ||
         chunk->word_size() == small_chunk_size() ||
         chunk->word_size() == specialized_chunk_size(),
2411 2412 2413 2414
         "Chunk size is wrong");
  return;
}

2415
#ifdef ASSERT
2416
void SpaceManager::verify_allocated_blocks_words() {
2417
  // Verification is only guaranteed at a safepoint.
2418 2419 2420
  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
    "Verification can fail if the applications is running");
  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2421 2422
    err_msg("allocation total is not consistent " SIZE_FORMAT
            " vs " SIZE_FORMAT,
2423
            allocated_blocks_words(), sum_used_in_chunks_in_use()));
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
}

#endif

void SpaceManager::dump(outputStream* const out) const {
  size_t curr_total = 0;
  size_t waste = 0;
  uint i = 0;
  size_t used = 0;
  size_t capacity = 0;

  // Add up statistics for all chunks in this SpaceManager.
2436
  for (ChunkIndex index = ZeroIndex;
2437
       index < NumberOfInUseLists;
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
       index = next_chunk_index(index)) {
    for (Metachunk* curr = chunks_in_use(index);
         curr != NULL;
         curr = curr->next()) {
      out->print("%d) ", i++);
      curr->print_on(out);
      if (TraceMetadataChunkAllocation && Verbose) {
        block_freelists()->print_on(out);
      }
      curr_total += curr->word_size();
      used += curr->used_word_size();
      capacity += curr->capacity_word_size();
      waste += curr->free_word_size() + curr->overhead();;
    }
  }

2454
  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2455 2456 2457 2458 2459 2460 2461 2462
  // Free space isn't wasted.
  waste -= free;

  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
}

2463
#ifndef PRODUCT
2464
void SpaceManager::mangle_freed_chunks() {
2465
  for (ChunkIndex index = ZeroIndex;
2466
       index < NumberOfInUseLists;
2467 2468 2469 2470 2471 2472 2473 2474
       index = next_chunk_index(index)) {
    for (Metachunk* curr = chunks_in_use(index);
         curr != NULL;
         curr = curr->next()) {
      curr->mangle();
    }
  }
}
2475
#endif // PRODUCT
2476 2477 2478

// MetaspaceAux

2479

2480 2481
size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2482

2483 2484 2485 2486 2487
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  return list == NULL ? 0 : list->free_bytes();
}

2488
size_t MetaspaceAux::free_bytes() {
2489
  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2490 2491
}

2492
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2493
  assert_lock_strong(SpaceManager::expand_lock());
2494
  assert(words <= allocated_capacity_words(mdtype),
2495
    err_msg("About to decrement below 0: words " SIZE_FORMAT
2496 2497 2498
            " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
            words, mdtype, allocated_capacity_words(mdtype)));
  _allocated_capacity_words[mdtype] -= words;
2499 2500
}

2501
void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2502 2503
  assert_lock_strong(SpaceManager::expand_lock());
  // Needs to be atomic
2504
  _allocated_capacity_words[mdtype] += words;
2505 2506
}

2507 2508
void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
  assert(words <= allocated_used_words(mdtype),
2509
    err_msg("About to decrement below 0: words " SIZE_FORMAT
2510 2511
            " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
            words, mdtype, allocated_used_words(mdtype)));
2512 2513 2514 2515 2516
  // For CMS deallocation of the Metaspaces occurs during the
  // sweep which is a concurrent phase.  Protection by the expand_lock()
  // is not enough since allocation is on a per Metaspace basis
  // and protected by the Metaspace lock.
  jlong minus_words = (jlong) - (jlong) words;
2517
  Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2518 2519
}

2520
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2521 2522 2523 2524
  // _allocated_used_words tracks allocations for
  // each piece of metadata.  Those allocations are
  // generally done concurrently by different application
  // threads so must be done atomically.
2525
  Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2526 2527 2528
}

size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2529 2530 2531 2532
  size_t used = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
2533
    // Sum allocated_blocks_words for each metaspace
2534
    if (msp != NULL) {
2535
      used += msp->used_words_slow(mdtype);
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
    }
  }
  return used * BytesPerWord;
}

size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
  size_t free = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
      free += msp->free_words(mdtype);
    }
  }
  return free * BytesPerWord;
}

2553
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2554 2555 2556
  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
    return 0;
  }
2557 2558 2559
  // Don't count the space in the freelists.  That space will be
  // added to the capacity calculation as needed.
  size_t capacity = 0;
2560 2561 2562 2563
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
2564
      capacity += msp->capacity_words_slow(mdtype);
2565 2566 2567 2568 2569 2570
    }
  }
  return capacity * BytesPerWord;
}

size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2571 2572
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  return list == NULL ? 0 : list->virtual_space_total();
2573 2574
}

2575
size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2576 2577

size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2578 2579
  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  if (list == NULL) {
2580 2581
    return 0;
  }
2582
  ChunkManager* chunk = list->chunk_manager();
2583
  chunk->slow_verify();
2584 2585 2586 2587 2588 2589 2590
  return chunk->free_chunks_total();
}

size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
  return free_chunks_total(mdtype) * BytesPerWord;
}

2591 2592 2593 2594 2595 2596 2597 2598 2599
size_t MetaspaceAux::free_chunks_total() {
  return free_chunks_total(Metaspace::ClassType) +
         free_chunks_total(Metaspace::NonClassType);
}

size_t MetaspaceAux::free_chunks_total_in_bytes() {
  return free_chunks_total() * BytesPerWord;
}

2600 2601 2602 2603 2604
void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  gclog_or_tty->print(", [Metaspace:");
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print(" "  SIZE_FORMAT
                        "->" SIZE_FORMAT
2605
                        "("  SIZE_FORMAT ")",
2606
                        prev_metadata_used,
2607
                        allocated_used_bytes(),
2608 2609 2610 2611
                        reserved_in_bytes());
  } else {
    gclog_or_tty->print(" "  SIZE_FORMAT "K"
                        "->" SIZE_FORMAT "K"
2612
                        "("  SIZE_FORMAT "K)",
2613
                        prev_metadata_used / K,
2614
                        allocated_used_bytes() / K,
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
                        reserved_in_bytes()/ K);
  }

  gclog_or_tty->print("]");
}

// This is printed when PrintGCDetails
void MetaspaceAux::print_on(outputStream* out) {
  Metaspace::MetadataType nct = Metaspace::NonClassType;

  out->print_cr(" Metaspace total "
                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                " reserved " SIZE_FORMAT "K",
2628
                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
2629 2630 2631 2632 2633 2634 2635

  out->print_cr("  data space     "
                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                " reserved " SIZE_FORMAT "K",
                allocated_capacity_bytes(nct)/K,
                allocated_used_bytes(nct)/K,
                reserved_in_bytes(nct)/K);
2636 2637 2638 2639 2640 2641 2642 2643 2644
  if (Metaspace::using_class_space()) {
    Metaspace::MetadataType ct = Metaspace::ClassType;
    out->print_cr("  class space    "
                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                  " reserved " SIZE_FORMAT "K",
                  allocated_capacity_bytes(ct)/K,
                  allocated_used_bytes(ct)/K,
                  reserved_in_bytes(ct)/K);
  }
2645 2646 2647 2648 2649 2650
}

// Print information for class space and data space separately.
// This is almost the same as above.
void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
  size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2651 2652
  size_t capacity_bytes = capacity_bytes_slow(mdtype);
  size_t used_bytes = used_bytes_slow(mdtype);
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
  size_t free_bytes = free_in_bytes(mdtype);
  size_t used_and_free = used_bytes + free_bytes +
                           free_chunks_capacity_bytes;
  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
             "K + unused in chunks " SIZE_FORMAT "K  + "
             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
             "K  capacity in allocated chunks " SIZE_FORMAT "K",
             used_bytes / K,
             free_bytes / K,
             free_chunks_capacity_bytes / K,
             used_and_free / K,
             capacity_bytes / K);
2665 2666
  // Accounting can only be correct if we got the values during a safepoint
  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2667 2668
}

2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
// Print total fragmentation for class metaspaces
void MetaspaceAux::print_class_waste(outputStream* out) {
  assert(Metaspace::using_class_space(), "class metaspace not used");
  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
    }
  }
  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
                "large count " SIZE_FORMAT,
                cls_specialized_count, cls_specialized_waste,
                cls_small_count, cls_small_waste,
                cls_medium_count, cls_medium_waste, cls_humongous_count);
}
2695

2696 2697
// Print total fragmentation for data and class metaspaces separately
void MetaspaceAux::print_waste(outputStream* out) {
2698 2699
  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2700 2701 2702 2703 2704

  ClassLoaderDataGraphMetaspaceIterator iter;
  while (iter.repeat()) {
    Metaspace* msp = iter.get_next();
    if (msp != NULL) {
2705 2706
      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2707
      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2708
      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2709
      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2710
      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2711
      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2712 2713 2714
    }
  }
  out->print_cr("Total fragmentation waste (words) doesn't count free space");
2715 2716
  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2717 2718
                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
                        "large count " SIZE_FORMAT,
2719
             specialized_count, specialized_waste, small_count,
2720
             small_waste, medium_count, medium_waste, humongous_count);
2721 2722 2723
  if (Metaspace::using_class_space()) {
    print_class_waste(out);
  }
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
}

// Dump global metaspace things from the end of ClassLoaderDataGraph
void MetaspaceAux::dump(outputStream* out) {
  out->print_cr("All Metaspace:");
  out->print("data space: "); print_on(out, Metaspace::NonClassType);
  out->print("class space: "); print_on(out, Metaspace::ClassType);
  print_waste(out);
}

2734 2735
void MetaspaceAux::verify_free_chunks() {
  Metaspace::space_list()->chunk_manager()->verify();
2736 2737 2738
  if (Metaspace::using_class_space()) {
    Metaspace::class_space_list()->chunk_manager()->verify();
  }
2739 2740
}

2741 2742 2743
void MetaspaceAux::verify_capacity() {
#ifdef ASSERT
  size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2744
  // For purposes of the running sum of capacity, verify against capacity
2745 2746 2747 2748 2749
  size_t capacity_in_use_bytes = capacity_bytes_slow();
  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
    err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
            " capacity_bytes_slow()" SIZE_FORMAT,
            running_sum_capacity_bytes, capacity_in_use_bytes));
2750 2751 2752 2753 2754 2755 2756 2757 2758
  for (Metaspace::MetadataType i = Metaspace::ClassType;
       i < Metaspace:: MetadataTypeCount;
       i = (Metaspace::MetadataType)(i + 1)) {
    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
    assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
      err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
              " capacity_bytes_slow(%u)" SIZE_FORMAT,
              i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
  }
2759 2760 2761 2762 2763 2764
#endif
}

void MetaspaceAux::verify_used() {
#ifdef ASSERT
  size_t running_sum_used_bytes = allocated_used_bytes();
2765
  // For purposes of the running sum of used, verify against used
2766 2767 2768
  size_t used_in_use_bytes = used_bytes_slow();
  assert(allocated_used_bytes() == used_in_use_bytes,
    err_msg("allocated_used_bytes() " SIZE_FORMAT
2769
            " used_bytes_slow()" SIZE_FORMAT,
2770
            allocated_used_bytes(), used_in_use_bytes));
2771 2772 2773 2774 2775 2776 2777 2778 2779
  for (Metaspace::MetadataType i = Metaspace::ClassType;
       i < Metaspace:: MetadataTypeCount;
       i = (Metaspace::MetadataType)(i + 1)) {
    size_t used_in_use_bytes = used_bytes_slow(i);
    assert(allocated_used_bytes(i) == used_in_use_bytes,
      err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
              " used_bytes_slow(%u)" SIZE_FORMAT,
              i, allocated_used_bytes(i), i, used_in_use_bytes));
  }
2780 2781 2782 2783 2784 2785 2786 2787 2788
#endif
}

void MetaspaceAux::verify_metrics() {
  verify_capacity();
  verify_used();
}


2789 2790 2791
// Metaspace methods

size_t Metaspace::_first_chunk_word_size = 0;
2792
size_t Metaspace::_first_class_chunk_word_size = 0;
2793

2794 2795
Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  initialize(lock, type);
2796 2797 2798 2799
}

Metaspace::~Metaspace() {
  delete _vsm;
2800 2801 2802
  if (using_class_space()) {
    delete _class_vsm;
  }
2803 2804 2805 2806 2807 2808 2809
}

VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL;

#define VIRTUALSPACEMULTIPLIER 2

2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
#ifdef _LP64
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
  // narrow_klass_base is the lower of the metaspace base and the cds base
  // (if cds is enabled).  The narrow_klass_shift depends on the distance
  // between the lower base and higher address.
  address lower_base;
  address higher_address;
  if (UseSharedSpaces) {
    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                          (address)(metaspace_base + class_metaspace_size()));
    lower_base = MIN2(metaspace_base, cds_base);
  } else {
    higher_address = metaspace_base + class_metaspace_size();
    lower_base = metaspace_base;
  }
  Universe::set_narrow_klass_base(lower_base);
  if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
    Universe::set_narrow_klass_shift(0);
  } else {
    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
  }
}

// Return TRUE if the specified metaspace_base and cds_base are close enough
// to work with compressed klass pointers.
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  address lower_base = MIN2((address)metaspace_base, cds_base);
  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                                (address)(metaspace_base + class_metaspace_size()));
  return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
}

// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  assert(using_class_space(), "called improperly");
  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
         "Metaspace size is too big");

  ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
                                             os::vm_allocation_granularity(),
                                             false, requested_addr, 0);
  if (!metaspace_rs.is_reserved()) {
    if (UseSharedSpaces) {
      // Keep trying to allocate the metaspace, increasing the requested_addr
      // by 1GB each time, until we reach an address that will no longer allow
      // use of CDS with compressed klass pointers.
      char *addr = requested_addr;
      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
        addr = addr + 1*G;
        metaspace_rs = ReservedSpace(class_metaspace_size(),
                                     os::vm_allocation_granularity(), false, addr, 0);
      }
    }

    // If no successful allocation then try to allocate the space anywhere.  If
    // that fails then OOM doom.  At this point we cannot try allocating the
    // metaspace as if UseCompressedKlassPointers is off because too much
    // initialization has happened that depends on UseCompressedKlassPointers.
    // So, UseCompressedKlassPointers cannot be turned off at this point.
    if (!metaspace_rs.is_reserved()) {
      metaspace_rs = ReservedSpace(class_metaspace_size(),
                                   os::vm_allocation_granularity(), false);
      if (!metaspace_rs.is_reserved()) {
        vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
                                              class_metaspace_size()));
      }
    }
  }

  // If we got here then the metaspace got allocated.
  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);

  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
    FileMapInfo::stop_sharing_and_unmap(
        "Could not allocate metaspace at a compatible address");
  }

  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
                                  UseSharedSpaces ? (address)cds_base : 0);

  initialize_class_space(metaspace_rs);

  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
                            Universe::narrow_klass_base(), Universe::narrow_klass_shift());
    gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
                           class_metaspace_size(), metaspace_rs.base(), requested_addr);
  }
}

// For UseCompressedKlassPointers the class space is reserved above the top of
// the Java heap.  The argument passed in is at the base of the compressed space.
void Metaspace::initialize_class_space(ReservedSpace rs) {
  // The reserved space size may be bigger because of alignment, esp with UseLargePages
  assert(rs.size() >= ClassMetaspaceSize,
         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
  assert(using_class_space(), "Must be using class space");
  _class_space_list = new VirtualSpaceList(rs);
}

#endif

2919 2920 2921
void Metaspace::global_initialize() {
  // Initialize the alignment for shared spaces.
  int max_alignment = os::vm_page_size();
2922 2923 2924 2925 2926
  size_t cds_total = 0;

  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
                                         os::vm_allocation_granularity()));

2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
  MetaspaceShared::set_max_alignment(max_alignment);

  if (DumpSharedSpaces) {
    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);

    // Initialize with the sum of the shared space sizes.  The read-only
    // and read write metaspace chunks will be allocated out of this and the
    // remainder is the misc code and data chunks.
2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
    cds_total = FileMapInfo::shared_spaces_size();
    _space_list = new VirtualSpaceList(cds_total/wordSize);

#ifdef _LP64
    // Set the compressed klass pointer base so that decoding of these pointers works
    // properly when creating the shared archive.
    assert(UseCompressedOops && UseCompressedKlassPointers,
      "UseCompressedOops and UseCompressedKlassPointers must be set");
    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
    if (TraceMetavirtualspaceAllocation && Verbose) {
      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
                             _space_list->current_virtual_space()->bottom());
    }

    // Set the shift to zero.
    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
           "CDS region is too large");
    Universe::set_narrow_klass_shift(0);
#endif

2958 2959 2960 2961
  } else {
    // If using shared space, open the file that contains the shared space
    // and map in the memory before initializing the rest of metaspace (so
    // the addresses don't conflict)
2962
    address cds_address = NULL;
2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
    if (UseSharedSpaces) {
      FileMapInfo* mapinfo = new FileMapInfo();
      memset(mapinfo, 0, sizeof(FileMapInfo));

      // Open the shared archive file, read and validate the header. If
      // initialization fails, shared spaces [UseSharedSpaces] are
      // disabled and the file is closed.
      // Map in spaces now also
      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
        FileMapInfo::set_current_info(mapinfo);
      } else {
        assert(!mapinfo->is_open() && !UseSharedSpaces,
               "archive file not closed or shared spaces not disabled.");
      }
2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989
      cds_total = FileMapInfo::shared_spaces_size();
      cds_address = (address)mapinfo->region_base(0);
    }

#ifdef _LP64
    // If UseCompressedKlassPointers is set then allocate the metaspace area
    // above the heap and above the CDS area (if it exists).
    if (using_class_space()) {
      if (UseSharedSpaces) {
        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
      } else {
        allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
      }
2990
    }
2991
#endif
2992

2993
    // Initialize these before initializing the VirtualSpaceList
2994
    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2995 2996 2997 2998 2999 3000 3001
    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
    // Make the first class chunk bigger than a medium chunk so it's not put
    // on the medium chunk list.   The next chunk will be small and progress
    // from there.  This size calculated by -version.
    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
                                       (ClassMetaspaceSize/BytesPerWord)*2);
    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3002 3003
    // Arbitrarily set the initial virtual space to a multiple
    // of the boot class loader size.
3004
    size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
3005 3006 3007 3008 3009
    // Initialize the list of virtual spaces.
    _space_list = new VirtualSpaceList(word_size);
  }
}

3010
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3011 3012 3013 3014

  assert(space_list() != NULL,
    "Metadata VirtualSpaceList has not been initialized");

3015
  _vsm = new SpaceManager(NonClassType, lock, space_list());
3016 3017 3018
  if (_vsm == NULL) {
    return;
  }
3019 3020
  size_t word_size;
  size_t class_word_size;
3021
  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3022

3023 3024 3025
  if (using_class_space()) {
    assert(class_space_list() != NULL,
      "Class VirtualSpaceList has not been initialized");
3026

3027 3028 3029 3030 3031
    // Allocate SpaceManager for classes.
    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
    if (_class_vsm == NULL) {
      return;
    }
3032 3033 3034 3035 3036 3037
  }

  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);

  // Allocate chunk for metadata objects
  Metachunk* new_chunk =
3038 3039
     space_list()->get_initialization_chunk(word_size,
                                            vsm()->medium_chunk_bunch());
3040 3041 3042 3043 3044 3045 3046
  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  if (new_chunk != NULL) {
    // Add to this manager's list of chunks in use and current_chunk().
    vsm()->add_chunk(new_chunk, true);
  }

  // Allocate chunk for class metadata objects
3047 3048 3049 3050 3051 3052 3053
  if (using_class_space()) {
    Metachunk* class_chunk =
       class_space_list()->get_initialization_chunk(class_word_size,
                                                    class_vsm()->medium_chunk_bunch());
    if (class_chunk != NULL) {
      class_vsm()->add_chunk(class_chunk, true);
    }
3054
  }
3055 3056 3057

  _alloc_record_head = NULL;
  _alloc_record_tail = NULL;
3058 3059
}

3060 3061 3062 3063 3064
size_t Metaspace::align_word_size_up(size_t word_size) {
  size_t byte_size = word_size * wordSize;
  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
}

3065 3066
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  // DumpSharedSpaces doesn't use class metadata area (yet)
3067 3068
  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
  if (mdtype == ClassType && using_class_space()) {
3069
    return  class_vsm()->allocate(word_size);
3070
  } else {
3071
    return  vsm()->allocate(word_size);
3072 3073 3074
  }
}

3075 3076 3077 3078
MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  MetaWord* result;
  MetaspaceGC::set_expand_after_GC(true);
  size_t before_inc = MetaspaceGC::capacity_until_GC();
3079 3080
  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3081 3082 3083 3084
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
      " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
  }
3085

3086 3087 3088 3089 3090
  result = allocate(word_size, mdtype);

  return result;
}

3091 3092 3093 3094 3095 3096 3097
// Space allocated in the Metaspace.  This may
// be across several metadata virtual spaces.
char* Metaspace::bottom() const {
  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
  return (char*)vsm()->current_chunk()->bottom();
}

3098
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3099 3100 3101 3102 3103
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  }
3104 3105 3106
}

size_t Metaspace::free_words(MetadataType mdtype) const {
3107 3108 3109 3110 3111
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_free_in_chunks_in_use();
  }
3112 3113 3114 3115 3116 3117 3118
}

// Space capacity in the Metaspace.  It includes
// space in the list of chunks from which allocations
// have been made. Don't include space in the global freelist and
// in the space available in the dictionary which
// is already counted in some chunk.
3119
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3120 3121 3122 3123 3124
  if (mdtype == ClassType) {
    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
  } else {
    return vsm()->sum_capacity_in_chunks_in_use();
  }
3125 3126
}

3127 3128 3129 3130 3131 3132 3133 3134
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
  return used_words_slow(mdtype) * BytesPerWord;
}

size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
  return capacity_words_slow(mdtype) * BytesPerWord;
}

3135 3136 3137
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
  if (SafepointSynchronize::is_at_safepoint()) {
    assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3138
    // Don't take Heap_lock
3139
    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3140 3141 3142 3143 3144 3145 3146
    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
      // Dark matter.  Too small for dictionary.
#ifdef ASSERT
      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
      return;
    }
3147 3148
    if (is_class && using_class_space()) {
      class_vsm()->deallocate(ptr, word_size);
3149
    } else {
3150
      vsm()->deallocate(ptr, word_size);
3151 3152
    }
  } else {
3153
    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3154

3155 3156 3157 3158 3159 3160 3161
    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
      // Dark matter.  Too small for dictionary.
#ifdef ASSERT
      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
      return;
    }
3162
    if (is_class && using_class_space()) {
3163
      class_vsm()->deallocate(ptr, word_size);
3164
    } else {
3165
      vsm()->deallocate(ptr, word_size);
3166 3167 3168 3169
    }
  }
}

3170
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3171
                              bool read_only, MetaspaceObj::Type type, TRAPS) {
3172 3173 3174 3175 3176
  if (HAS_PENDING_EXCEPTION) {
    assert(false, "Should not allocate with exception pending");
    return NULL;  // caller does a CHECK_NULL too
  }

3177 3178
  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;

3179 3180 3181 3182 3183 3184 3185 3186 3187
  // SSS: Should we align the allocations and make sure the sizes are aligned.
  MetaWord* result = NULL;

  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
        "ClassLoaderData::the_null_class_loader_data() should have been used.");
  // Allocate in metaspaces without taking out a lock, because it deadlocks
  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  // to revisit this for application class data sharing.
  if (DumpSharedSpaces) {
3188 3189 3190
    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
    result = space->allocate(word_size, NonClassType);
3191 3192
    if (result == NULL) {
      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3193 3194
    } else {
      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3195
    }
3196
    return Metablock::initialize(result, word_size);
3197 3198 3199 3200 3201 3202 3203
  }

  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);

  if (result == NULL) {
    // Try to clean out some memory and retry.
    result =
3204
      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3205 3206 3207 3208
        loader_data, word_size, mdtype);

    // If result is still null, we are out of memory.
    if (result == NULL) {
3209 3210 3211
      if (Verbose && TraceMetadataChunkAllocation) {
        gclog_or_tty->print_cr("Metaspace allocation failed for size "
          SIZE_FORMAT, word_size);
3212
        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
3213 3214
        MetaspaceAux::dump(gclog_or_tty);
      }
3215
      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3216 3217 3218
      const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
                                                         "Metadata space";
      report_java_out_of_memory(space_string);
3219 3220 3221 3222

      if (JvmtiExport::should_post_resource_exhausted()) {
        JvmtiExport::post_resource_exhausted(
            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3223 3224 3225 3226 3227 3228
            space_string);
      }
      if (mdtype == ClassType) {
        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
      } else {
        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
3229 3230 3231
      }
    }
  }
3232
  return Metablock::initialize(result, word_size);
3233 3234
}

3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  assert(DumpSharedSpaces, "sanity");

  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
  if (_alloc_record_head == NULL) {
    _alloc_record_head = _alloc_record_tail = rec;
  } else {
    _alloc_record_tail->_next = rec;
    _alloc_record_tail = rec;
  }
}

void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");

  address last_addr = (address)bottom();

  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
    address ptr = rec->_ptr;
    if (last_addr < ptr) {
      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
    }
    closure->doit(ptr, rec->_type, rec->_byte_size);
    last_addr = ptr + rec->_byte_size;
  }

  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
  if (last_addr < top) {
    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
  }
}

3267 3268 3269 3270
void Metaspace::purge() {
  MutexLockerEx cl(SpaceManager::expand_lock(),
                   Mutex::_no_safepoint_check_flag);
  space_list()->purge();
3271 3272 3273
  if (using_class_space()) {
    class_space_list()->purge();
  }
3274 3275
}

3276 3277 3278
void Metaspace::print_on(outputStream* out) const {
  // Print both class virtual space counts and metaspace.
  if (Verbose) {
3279 3280
    vsm()->print_on(out);
    if (using_class_space()) {
3281
      class_vsm()->print_on(out);
3282
    }
3283 3284 3285
  }
}

3286
bool Metaspace::contains(const void * ptr) {
3287 3288 3289
  if (MetaspaceShared::is_in_shared_space(ptr)) {
    return true;
  }
3290 3291 3292 3293 3294
  // This is checked while unlocked.  As long as the virtualspaces are added
  // at the end, the pointer will be in one of them.  The virtual spaces
  // aren't deleted presently.  When they are, some sort of locking might
  // be needed.  Note, locking this can cause inversion problems with the
  // caller in MetaspaceObj::is_metadata() function.
3295
  return space_list()->contains(ptr) ||
3296
         (using_class_space() && class_space_list()->contains(ptr));
3297 3298 3299 3300
}

void Metaspace::verify() {
  vsm()->verify();
3301 3302 3303
  if (using_class_space()) {
    class_vsm()->verify();
  }
3304 3305 3306 3307 3308
}

void Metaspace::dump(outputStream* const out) const {
  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  vsm()->dump(out);
3309 3310 3311 3312
  if (using_class_space()) {
    out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
    class_vsm()->dump(out);
  }
3313
}