allocation.hpp 26.6 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29
#ifndef SHARE_VM_MEMORY_ALLOCATION_HPP
#define SHARE_VM_MEMORY_ALLOCATION_HPP

#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
30
#include "utilities/macros.hpp"
31 32 33 34 35 36 37
#ifdef COMPILER1
#include "c1/c1_globals.hpp"
#endif
#ifdef COMPILER2
#include "opto/c2_globals.hpp"
#endif

38 39
#include <new>

D
duke 已提交
40 41 42 43
#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)

Z
zgu 已提交
44 45 46 47 48 49 50 51 52 53 54 55

// noinline attribute
#ifdef _WINDOWS
  #define _NOINLINE_  __declspec(noinline)
#else
  #if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
    #define _NOINLINE_
  #else
    #define _NOINLINE_ __attribute__ ((noinline))
  #endif
#endif

56 57 58 59 60 61
class AllocFailStrategy {
public:
  enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
};
typedef AllocFailStrategy::AllocFailEnum AllocFailType;

D
duke 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
// All classes in the virtual machine must be subclassed
// by one of the following allocation classes:
//
// For objects allocated in the resource area (see resourceArea.hpp).
// - ResourceObj
//
// For objects allocated in the C-heap (managed by: free & malloc).
// - CHeapObj
//
// For objects allocated on the stack.
// - StackObj
//
// For embedded objects.
// - ValueObj
//
// For classes used as name spaces.
// - AllStatic
//
80 81 82
// For classes in Metaspace (class data)
// - MetaspaceObj
//
D
duke 已提交
83 84 85 86 87 88
// The printable subclasses are used for debugging and define virtual
// member functions for printing. Classes that avoid allocating the
// vtbl entries in the objects should therefore not be the printable
// subclasses.
//
// The following macros and function should be used to allocate memory
89 90 91 92 93
// directly in the resource area or in the C-heap, The _OBJ variants
// of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple
// objects which are not inherited from CHeapObj, note constructor and
// destructor are not called. The preferable way to allocate objects
// is using the new operator.
D
duke 已提交
94
//
95 96 97 98 99 100
// WARNING: The array variant must only be used for a homogenous array
// where all objects are of the exact type specified. If subtypes are
// stored in the array then must pay attention to calling destructors
// at needed.
//
//   NEW_RESOURCE_ARRAY(type, size)
D
duke 已提交
101
//   NEW_RESOURCE_OBJ(type)
102 103 104 105
//   NEW_C_HEAP_ARRAY(type, size)
//   NEW_C_HEAP_OBJ(type, memflags)
//   FREE_C_HEAP_ARRAY(type, old, memflags)
//   FREE_C_HEAP_OBJ(objname, type, memflags)
D
duke 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
//   char* AllocateHeap(size_t size, const char* name);
//   void  FreeHeap(void* p);
//
// C-heap allocation can be traced using +PrintHeapAllocation.
// malloc and free should therefore never called directly.

// Base class for objects allocated in the C-heap.

// In non product mode we introduce a super class for all allocation classes
// that supports printing.
// We avoid the superclass in product mode since some C++ compilers add
// a word overhead for empty super classes.

#ifdef PRODUCT
#define ALLOCATION_SUPER_CLASS_SPEC
#else
#define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj
class AllocatedObj {
 public:
  // Printing support
  void print() const;
  void print_value() const;

  virtual void print_on(outputStream* st) const;
  virtual void print_value_on(outputStream* st) const;
};
#endif

Z
zgu 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

/*
 * MemoryType bitmap layout:
 * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
 * |      memory type        |   object    | reserved    |
 * |                         |     type    |             |
 */
enum MemoryType {
  // Memory type by sub systems. It occupies lower byte.
  mtNone              = 0x0000,  // undefined
  mtClass             = 0x0100,  // memory class for Java classes
  mtThread            = 0x0200,  // memory for thread objects
  mtThreadStack       = 0x0300,
  mtCode              = 0x0400,  // memory for generated code
  mtGC                = 0x0500,  // memory for GC
  mtCompiler          = 0x0600,  // memory for compiler
  mtInternal          = 0x0700,  // memory used by VM, but does not belong to
                                 // any of above categories, and not used for
                                 // native memory tracking
  mtOther             = 0x0800,  // memory not used by VM
  mtSymbol            = 0x0900,  // symbol
  mtNMT               = 0x0A00,  // memory used by native memory tracking
  mtChunk             = 0x0B00,  // chunk that holds content of arenas
  mtJavaHeap          = 0x0C00,  // Java heap
158
  mtClassShared       = 0x0D00,  // class data sharing
159
  mtTest              = 0x0E00,  // Test type for verifying NMT
S
sla 已提交
160 161
  mtTracing           = 0x0F00,  // memory used for Tracing
  mt_number_of_types  = 0x000F,  // number of memory types (mtDontTrack
162
                                 // is not included as validate type)
163
  mtDontTrack         = 0x0F00,  // memory we do not or cannot track
Z
zgu 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
  mt_masks            = 0x7F00,

  // object type mask
  otArena             = 0x0010, // an arena object
  otNMTRecorder       = 0x0020, // memory recorder object
  ot_masks            = 0x00F0
};

#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)

#define IS_ARENA_OBJ(flags)         ((flags & ot_masks) == otArena)
#define IS_NMT_RECORDER(flags)      ((flags & ot_masks) == otNMTRecorder)
#define NMT_CAN_TRACK(flags)        (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))

typedef unsigned short MEMFLAGS;

182 183
#if INCLUDE_NMT

Z
zgu 已提交
184 185
extern bool NMT_track_callsite;

186 187 188 189 190 191
#else

const bool NMT_track_callsite = false;

#endif // INCLUDE_NMT

Z
zgu 已提交
192
// debug build does not inline
D
drchase 已提交
193
#if defined(_NMT_NOINLINE_)
Z
zgu 已提交
194 195 196 197 198 199 200 201 202 203 204 205
  #define CURRENT_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
  #define CALLER_PC        (NMT_track_callsite ? os::get_caller_pc(2) : 0)
  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
#else
  #define CURRENT_PC      (NMT_track_callsite? os::get_caller_pc(0) : 0)
  #define CALLER_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#endif



template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
D
duke 已提交
206
 public:
Z
zgu 已提交
207 208 209
  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
  _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
                               address caller_pc = 0);
210 211 212
  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0);
  _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
                               address caller_pc = 0);
D
duke 已提交
213
  void  operator delete(void* p);
214
  void  operator delete [] (void* p);
D
duke 已提交
215 216 217 218 219 220
};

// Base class for objects allocated on the stack only.
// Calling new or delete will result in fatal error.

class StackObj ALLOCATION_SUPER_CLASS_SPEC {
221
 private:
D
duke 已提交
222 223
  void* operator new(size_t size);
  void  operator delete(void* p);
224 225
  void* operator new [](size_t size);
  void  operator delete [](void* p);
D
duke 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
};

// Base class for objects used as value objects.
// Calling new or delete will result in fatal error.
//
// Portability note: Certain compilers (e.g. gcc) will
// always make classes bigger if it has a superclass, even
// if the superclass does not have any virtual methods or
// instance fields. The HotSpot implementation relies on this
// not to happen. So never make a ValueObj class a direct subclass
// of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g.,
// like this:
//
//   class A VALUE_OBJ_CLASS_SPEC {
//     ...
//   }
//
// With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can
// be defined as a an empty string "".
//
class _ValueObj {
247
 private:
D
duke 已提交
248
  void* operator new(size_t size);
249 250 251
  void  operator delete(void* p);
  void* operator new [](size_t size);
  void  operator delete [](void* p);
D
duke 已提交
252 253
};

254 255 256 257 258 259 260 261 262 263 264 265 266

// Base class for objects stored in Metaspace.
// Calling delete will result in fatal error.
//
// Do not inherit from something with a vptr because this class does
// not introduce one.  This class is used to allocate both shared read-only
// and shared read-write classes.
//

class ClassLoaderData;

class MetaspaceObj {
 public:
267
  bool is_metaspace_object() const;  // more specific test but slower
268 269 270
  bool is_shared() const;
  void print_address_on(outputStream* st) const;  // nonvirtual address printing

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
#define METASPACE_OBJ_TYPES_DO(f) \
  f(Unknown) \
  f(Class) \
  f(Symbol) \
  f(TypeArrayU1) \
  f(TypeArrayU2) \
  f(TypeArrayU4) \
  f(TypeArrayU8) \
  f(TypeArrayOther) \
  f(Method) \
  f(ConstMethod) \
  f(MethodData) \
  f(ConstantPool) \
  f(ConstantPoolCache) \
  f(Annotation) \
  f(MethodCounters)

#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;

  enum Type {
    // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
    _number_of_types
  };

  static const char * type_name(Type type) {
    switch(type) {
    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
    default:
      ShouldNotReachHere();
      return NULL;
    }
  }

  static MetaspaceObj::Type array_type(size_t elem_size) {
    switch (elem_size) {
    case 1: return TypeArrayU1Type;
    case 2: return TypeArrayU2Type;
    case 4: return TypeArrayU4Type;
    case 8: return TypeArrayU8Type;
    default:
      return TypeArrayOtherType;
    }
  }

317
  void* operator new(size_t size, ClassLoaderData* loader_data,
318 319
                     size_t word_size, bool read_only,
                     Type type, Thread* thread);
320 321 322 323
                     // can't use TRAPS from this header file.
  void operator delete(void* p) { ShouldNotCallThis(); }
};

D
duke 已提交
324 325 326 327 328 329 330 331 332 333 334
// Base class for classes that constitute name spaces.

class AllStatic {
 public:
  AllStatic()  { ShouldNotCallThis(); }
  ~AllStatic() { ShouldNotCallThis(); }
};


//------------------------------Chunk------------------------------------------
// Linked list of raw memory chunks
Z
zgu 已提交
335
class Chunk: CHeapObj<mtChunk> {
N
never 已提交
336 337
  friend class VMStructs;

D
duke 已提交
338 339 340 341
 protected:
  Chunk*       _next;     // Next Chunk in list
  const size_t _len;      // Size of this Chunk
 public:
342
  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
D
duke 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355
  void  operator delete(void* p);
  Chunk(size_t length);

  enum {
    // default sizes; make them slightly smaller than 2**k to guard against
    // buddy-system style malloc implementations
#ifdef _LP64
    slack      = 40,            // [RGV] Not sure if this is right, but make it
                                //       a multiple of 8.
#else
    slack      = 20,            // suspected sizeof(Chunk) + internal malloc headers
#endif

356 357
    tiny_size  =  256  - slack, // Size of first chunk (tiny)
    init_size  =  1*K  - slack, // Size of first chunk (normal aka small)
D
duke 已提交
358 359 360 361 362 363 364 365
    medium_size= 10*K  - slack, // Size of medium-sized chunk
    size       = 32*K  - slack, // Default size of an Arena chunk (following the first)
    non_pool_size = init_size + 32 // An initial size which is not one of above
  };

  void chop();                  // Chop this chunk
  void next_chop();             // Chop next chunk
  static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
366
  static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
D
duke 已提交
367 368 369 370 371 372 373 374 375 376 377 378

  size_t length() const         { return _len;  }
  Chunk* next() const           { return _next;  }
  void set_next(Chunk* n)       { _next = n;  }
  // Boundaries of data area (possibly unused)
  char* bottom() const          { return ((char*) this) + aligned_overhead_size();  }
  char* top()    const          { return bottom() + _len; }
  bool contains(char* p) const  { return bottom() <= p && p <= top(); }

  // Start the chunk_pool cleaner task
  static void start_chunk_pool_cleaner_task();

379 380
  static void clean_chunk_pool();
};
D
duke 已提交
381 382 383

//------------------------------Arena------------------------------------------
// Fast allocation of memory
Z
zgu 已提交
384
class Arena : public CHeapObj<mtNone|otArena> {
D
duke 已提交
385 386 387 388
protected:
  friend class ResourceMark;
  friend class HandleMark;
  friend class NoHandleMark;
N
never 已提交
389 390
  friend class VMStructs;

D
duke 已提交
391 392 393
  Chunk *_first;                // First chunk
  Chunk *_chunk;                // current chunk
  char *_hwm, *_max;            // High water mark and max in current chunk
394 395
  // Get a new Chunk of at least size x
  void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
Z
zgu 已提交
396 397
  size_t _size_in_bytes;        // Size of arena (used for native memory tracking)

398
  NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
D
duke 已提交
399 400 401
  friend class AllocStats;
  debug_only(void* malloc(size_t size);)
  debug_only(void* internal_malloc_4(size_t x);)
402
  NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
403 404 405

  void signal_out_of_memory(size_t request, const char* whence) const;

406 407
  bool check_for_overflow(size_t request, const char* whence,
      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
408
    if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
409 410 411
      if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
        return false;
      }
412 413
      signal_out_of_memory(request, whence);
    }
414
    return true;
415 416
 }

D
duke 已提交
417 418 419 420 421 422 423
 public:
  Arena();
  Arena(size_t init_size);
  ~Arena();
  void  destruct_contents();
  char* hwm() const             { return _hwm; }

Z
zgu 已提交
424 425 426 427 428 429 430 431 432
  // new operators
  void* operator new (size_t size);
  void* operator new (size_t size, const std::nothrow_t& nothrow_constant);

  // dynamic memory type tagging
  void* operator new(size_t size, MEMFLAGS flags);
  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
  void  operator delete(void* p);

D
duke 已提交
433
  // Fast allocate in the arena.  Common case is: pointer test + increment.
434
  void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
D
duke 已提交
435 436 437
    assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
    x = ARENA_ALIGN(x);
    debug_only(if (UseMallocOnly) return malloc(x);)
438 439
    if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
      return NULL;
440
    NOT_PRODUCT(inc_bytes_allocated(x);)
D
duke 已提交
441
    if (_hwm + x > _max) {
442
      return grow(x, alloc_failmode);
D
duke 已提交
443 444 445 446 447 448 449
    } else {
      char *old = _hwm;
      _hwm += x;
      return old;
    }
  }
  // Further assume size is padded out to words
450
  void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
D
duke 已提交
451 452
    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
    debug_only(if (UseMallocOnly) return malloc(x);)
453 454
    if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
      return NULL;
455
    NOT_PRODUCT(inc_bytes_allocated(x);)
D
duke 已提交
456
    if (_hwm + x > _max) {
457
      return grow(x, alloc_failmode);
D
duke 已提交
458 459 460 461 462 463 464 465 466
    } else {
      char *old = _hwm;
      _hwm += x;
      return old;
    }
  }

  // Allocate with 'double' alignment. It is 8 bytes on sparc.
  // In other cases Amalloc_D() should be the same as Amalloc_4().
467
  void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
D
duke 已提交
468 469 470 471 472 473 474
    assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
    debug_only(if (UseMallocOnly) return malloc(x);)
#if defined(SPARC) && !defined(_LP64)
#define DALIGN_M1 7
    size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
    x += delta;
#endif
475 476
    if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
      return NULL;
477
    NOT_PRODUCT(inc_bytes_allocated(x);)
D
duke 已提交
478
    if (_hwm + x > _max) {
479
      return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
D
duke 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
    } else {
      char *old = _hwm;
      _hwm += x;
#if defined(SPARC) && !defined(_LP64)
      old += delta; // align to 8-bytes
#endif
      return old;
    }
  }

  // Fast delete in area.  Common case is: NOP (except for storage reclaimed)
  void Afree(void *ptr, size_t size) {
#ifdef ASSERT
    if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
    if (UseMallocOnly) return;
#endif
    if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
  }

499 500
  void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
D
duke 已提交
501 502 503 504 505 506 507 508 509 510 511

  // Move contents of this arena into an empty arena
  Arena *move_contents(Arena *empty_arena);

  // Determine if pointer belongs to this Arena or not.
  bool contains( const void *ptr ) const;

  // Total of all chunks in use (not thread-safe)
  size_t used() const;

  // Total # of bytes used
Z
zgu 已提交
512 513 514
  size_t size_in_bytes() const         {  return _size_in_bytes; };
  void set_size_in_bytes(size_t size);

D
duke 已提交
515 516 517
  static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
  static void free_all(char** start, char** end)                                     PRODUCT_RETURN;

Z
zgu 已提交
518 519
  // how many arena instances
  NOT_PRODUCT(static volatile jint _instance_count;)
D
duke 已提交
520 521 522 523 524
private:
  // Reset this Arena to empty, access will trigger grow if necessary
  void   reset(void) {
    _first = _chunk = NULL;
    _hwm = _max = NULL;
Z
zgu 已提交
525
    set_size_in_bytes(0);
D
duke 已提交
526 527 528 529 530
  }
};

// One of the following macros must be used when allocating
// an array or object from an arena
531 532
#define NEW_ARENA_ARRAY(arena, type, size) \
  (type*) (arena)->Amalloc((size) * sizeof(type))
D
duke 已提交
533

534 535 536
#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)    \
  (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
                            (new_size) * sizeof(type) )
D
duke 已提交
537

538 539
#define FREE_ARENA_ARRAY(arena, type, old, size) \
  (arena)->Afree((char*)(old), (size) * sizeof(type))
D
duke 已提交
540

541
#define NEW_ARENA_OBJ(arena, type) \
D
duke 已提交
542 543 544 545
  NEW_ARENA_ARRAY(arena, type, 1)


//%note allocation_1
546 547 548 549 550 551
extern char* resource_allocate_bytes(size_t size,
    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
extern char* resource_allocate_bytes(Thread* thread, size_t size,
    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size,
    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
D
duke 已提交
552 553 554 555 556 557 558 559 560 561 562
extern void resource_free_bytes( char *old, size_t size );

//----------------------------------------------------------------------
// Base class for objects allocated in the resource area per default.
// Optionally, objects may be allocated on the C heap with
// new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena)
// ResourceObj's can be allocated within other objects, but don't use
// new or delete (allocation_type is unknown).  If new is used to allocate,
// use delete to deallocate.
class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
 public:
563
  enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
564
  static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
D
duke 已提交
565 566
#ifdef ASSERT
 private:
567 568 569
  // When this object is allocated on stack the new() operator is not
  // called but garbage on stack may look like a valid allocation_type.
  // Store negated 'this' pointer when new() is called to distinguish cases.
570 571 572
  // Use second array's element for verification value to distinguish garbage.
  uintptr_t _allocation_t[2];
  bool is_type_set() const;
D
duke 已提交
573
 public:
574 575 576 577 578
  allocation_type get_allocation_type() const;
  bool allocated_on_stack()    const { return get_allocation_type() == STACK_OR_EMBEDDED; }
  bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
  bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
  bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
579 580 581 582
  ResourceObj(); // default construtor
  ResourceObj(const ResourceObj& r); // default copy construtor
  ResourceObj& operator=(const ResourceObj& r); // default copy assignment
  ~ResourceObj();
D
duke 已提交
583 584 585
#endif // ASSERT

 public:
Z
zgu 已提交
586
  void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
587
  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags);
588 589
  void* operator new(size_t size, const std::nothrow_t&  nothrow_constant,
      allocation_type type, MEMFLAGS flags);
590 591 592
  void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
      allocation_type type, MEMFLAGS flags);

D
duke 已提交
593 594
  void* operator new(size_t size, Arena *arena) {
      address res = (address)arena->Amalloc(size);
595
      DEBUG_ONLY(set_allocation_type(res, ARENA);)
D
duke 已提交
596 597
      return res;
  }
598 599 600 601 602 603 604

  void* operator new [](size_t size, Arena *arena) {
      address res = (address)arena->Amalloc(size);
      DEBUG_ONLY(set_allocation_type(res, ARENA);)
      return res;
  }

D
duke 已提交
605 606
  void* operator new(size_t size) {
      address res = (address)resource_allocate_bytes(size);
607
      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
D
duke 已提交
608 609
      return res;
  }
610 611 612 613 614 615 616

  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
      address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
      DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
      return res;
  }

617 618 619 620 621 622 623 624 625 626 627 628
  void* operator new [](size_t size) {
      address res = (address)resource_allocate_bytes(size);
      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
      return res;
  }

  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) {
      address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
      DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
      return res;
  }

D
duke 已提交
629
  void  operator delete(void* p);
630
  void  operator delete [](void* p);
D
duke 已提交
631 632 633 634 635 636 637 638 639
};

// One of the following macros must be used when allocating an array
// or object to determine whether it should reside in the C heap on in
// the resource area.

#define NEW_RESOURCE_ARRAY(type, size)\
  (type*) resource_allocate_bytes((size) * sizeof(type))

640 641 642
#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
  (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)

D
duke 已提交
643 644 645
#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
  (type*) resource_allocate_bytes(thread, (size) * sizeof(type))

646 647 648
#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\
  (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL)

D
duke 已提交
649
#define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
650 651 652 653 654
  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type))

#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\
  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\
                                    (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
D
duke 已提交
655 656 657 658 659 660 661 662 663 664

#define FREE_RESOURCE_ARRAY(type, old, size)\
  resource_free_bytes((char*)(old), (size) * sizeof(type))

#define FREE_FAST(old)\
    /* nop */

#define NEW_RESOURCE_OBJ(type)\
  NEW_RESOURCE_ARRAY(type, 1)

665 666 667 668
#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
  NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)

#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
669
  (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
670 671 672 673

#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))

Z
zgu 已提交
674 675 676
#define NEW_C_HEAP_ARRAY(type, size, memflags)\
  (type*) (AllocateHeap((size) * sizeof(type), memflags))

677
#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
678
  NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
679 680

#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
681
  NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
682

Z
zgu 已提交
683
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
684
  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
Z
zgu 已提交
685

686
#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
687
  (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
688

689
#define FREE_C_HEAP_ARRAY(type, old, memflags) \
Z
zgu 已提交
690 691
  FreeHeap((char*)(old), memflags)

692 693 694
// allocate type in heap without calling ctor
#define NEW_C_HEAP_OBJ(type, memflags)\
  NEW_C_HEAP_ARRAY(type, 1, memflags)
D
duke 已提交
695

696 697 698
#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
  NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)

699 700 701
// deallocate obj of type in heap without calling dtor
#define FREE_C_HEAP_OBJ(objname, memflags)\
  FreeHeap((char*)objname, memflags);
D
duke 已提交
702 703 704 705

// for statistics
#ifndef PRODUCT
class AllocStats : StackObj {
706 707
  julong start_mallocs, start_frees;
  julong start_malloc_bytes, start_mfree_bytes, start_res_bytes;
D
duke 已提交
708 709 710
 public:
  AllocStats();

711 712 713 714 715
  julong num_mallocs();    // since creation of receiver
  julong alloc_bytes();
  julong num_frees();
  julong free_bytes();
  julong resource_bytes();
D
duke 已提交
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
  void   print();
};
#endif


//------------------------------ReallocMark---------------------------------
// Code which uses REALLOC_RESOURCE_ARRAY should check an associated
// ReallocMark, which is declared in the same scope as the reallocated
// pointer.  Any operation that could __potentially__ cause a reallocation
// should check the ReallocMark.
class ReallocMark: public StackObj {
protected:
  NOT_PRODUCT(int _nesting;)

public:
  ReallocMark()   PRODUCT_RETURN;
  void check()    PRODUCT_RETURN;
};
734

735 736 737 738 739 740 741 742
// Helper class to allocate arrays that may become large.
// Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit
// and uses mapped memory for larger allocations.
// Most OS mallocs do something similar but Solaris malloc does not revert
// to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
// is set so that we always use malloc except for Solaris where we set the
// limit to get mapped memory.
template <class E, MEMFLAGS F>
743
class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
744 745 746
  char* _addr;
  bool _use_malloc;
  size_t _size;
747
  bool _free_in_destructor;
748
 public:
749 750 751 752 753 754 755 756 757
  ArrayAllocator(bool free_in_destructor = true) :
    _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }

  ~ArrayAllocator() {
    if (_free_in_destructor) {
      free();
    }
  }

758 759 760 761
  E* allocate(size_t length);
  void free();
};

762
#endif // SHARE_VM_MEMORY_ALLOCATION_HPP