sweeper.cpp 26.7 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26
#include "precompiled.hpp"
#include "code/codeCache.hpp"
27 28
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
29 30 31
#include "code/nmethod.hpp"
#include "compiler/compileBroker.hpp"
#include "memory/resourceArea.hpp"
32
#include "oops/method.hpp"
33 34 35
#include "runtime/atomic.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/mutexLocker.hpp"
36
#include "runtime/orderAccess.inline.hpp"
37 38
#include "runtime/os.hpp"
#include "runtime/sweeper.hpp"
39
#include "runtime/thread.inline.hpp"
40
#include "runtime/vm_operations.hpp"
S
sla 已提交
41
#include "trace/tracing.hpp"
42
#include "utilities/events.hpp"
43
#include "utilities/ticks.inline.hpp"
44
#include "utilities/xmlstream.hpp"
D
duke 已提交
45

46 47
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
#ifdef ASSERT

#define SWEEP(nm) record_sweep(nm, __LINE__)
// Sweeper logging code
class SweeperRecord {
 public:
  int traversal;
  int invocation;
  int compile_id;
  long traversal_mark;
  int state;
  const char* kind;
  address vep;
  address uep;
  int line;

  void print() {
      tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
                    PTR_FORMAT " state = %d traversal_mark %d line = %d",
                    traversal,
                    invocation,
                    compile_id,
                    kind == NULL ? "" : kind,
                    uep,
                    vep,
                    state,
                    traversal_mark,
                    line);
  }
};

static int _sweep_index = 0;
static SweeperRecord* _records = NULL;

void NMethodSweeper::report_events(int id, address entry) {
  if (_records != NULL) {
    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
      if (_records[i].uep == entry ||
          _records[i].vep == entry ||
          _records[i].compile_id == id) {
        _records[i].print();
      }
    }
    for (int i = 0; i < _sweep_index; i++) {
      if (_records[i].uep == entry ||
          _records[i].vep == entry ||
          _records[i].compile_id == id) {
        _records[i].print();
      }
    }
  }
}

void NMethodSweeper::report_events() {
  if (_records != NULL) {
    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
      // skip empty records
      if (_records[i].vep == NULL) continue;
      _records[i].print();
    }
    for (int i = 0; i < _sweep_index; i++) {
      // skip empty records
      if (_records[i].vep == NULL) continue;
      _records[i].print();
    }
  }
}

void NMethodSweeper::record_sweep(nmethod* nm, int line) {
  if (_records != NULL) {
    _records[_sweep_index].traversal = _traversals;
    _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
120
    _records[_sweep_index].invocation = _sweep_fractions_left;
121 122 123 124 125 126 127 128 129 130 131 132 133
    _records[_sweep_index].compile_id = nm->compile_id();
    _records[_sweep_index].kind = nm->compile_kind();
    _records[_sweep_index].state = nm->_state;
    _records[_sweep_index].vep = nm->verified_entry_point();
    _records[_sweep_index].uep = nm->entry_point();
    _records[_sweep_index].line = line;
    _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
  }
}
#else
#define SWEEP(nm)
#endif

134 135
nmethod* NMethodSweeper::_current                      = NULL; // Current nmethod
long     NMethodSweeper::_traversals                   = 0;    // Stack scan count, also sweep ID.
136
long     NMethodSweeper::_total_nof_code_cache_sweeps  = 0;    // Total number of full sweeps of the code cache
137 138 139 140 141 142 143 144 145 146 147 148 149 150
long     NMethodSweeper::_time_counter                 = 0;    // Virtual time used to periodically invoke sweeper
long     NMethodSweeper::_last_sweep                   = 0;    // Value of _time_counter when the last sweep happened
int      NMethodSweeper::_seen                         = 0;    // Nof. nmethod we have currently processed in current pass of CodeCache
int      NMethodSweeper::_flushed_count                = 0;    // Nof. nmethods flushed in current sweep
int      NMethodSweeper::_zombified_count              = 0;    // Nof. nmethods made zombie in current sweep
int      NMethodSweeper::_marked_for_reclamation_count = 0;    // Nof. nmethods marked for reclaim in current sweep

volatile bool NMethodSweeper::_should_sweep            = true; // Indicates if we should invoke the sweeper
volatile int  NMethodSweeper::_sweep_fractions_left    = 0;    // Nof. invocations left until we are completed with this pass
volatile int  NMethodSweeper::_sweep_started           = 0;    // Flag to control conc sweeper
volatile int  NMethodSweeper::_bytes_changed           = 0;    // Counts the total nmethod size if the nmethod changed from:
                                                               //   1) alive       -> not_entrant
                                                               //   2) not_entrant -> zombie
                                                               //   3) zombie      -> marked_for_reclamation
151 152 153 154 155 156 157 158 159
int    NMethodSweeper::_hotness_counter_reset_val       = 0;

long   NMethodSweeper::_total_nof_methods_reclaimed     = 0;    // Accumulated nof methods flushed
long   NMethodSweeper::_total_nof_c2_methods_reclaimed  = 0;    // Accumulated nof methods flushed
size_t NMethodSweeper::_total_flushed_size              = 0;    // Total number of bytes flushed from the code cache
Tickspan  NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
Tickspan  NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
Tickspan  NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
Tickspan  NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
160

161

S
sla 已提交
162

163 164 165
class MarkActivationClosure: public CodeBlobClosure {
public:
  virtual void do_code_blob(CodeBlob* cb) {
166 167 168 169 170 171 172
    if (cb->is_nmethod()) {
      nmethod* nm = (nmethod*)cb;
      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
      // If we see an activation belonging to a non_entrant nmethod, we mark it.
      if (nm->is_not_entrant()) {
        nm->mark_as_seen_on_stack();
      }
173 174 175 176 177
    }
  }
};
static MarkActivationClosure mark_activation_closure;

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
class SetHotnessClosure: public CodeBlobClosure {
public:
  virtual void do_code_blob(CodeBlob* cb) {
    if (cb->is_nmethod()) {
      nmethod* nm = (nmethod*)cb;
      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
    }
  }
};
static SetHotnessClosure set_hotness_closure;


int NMethodSweeper::hotness_counter_reset_val() {
  if (_hotness_counter_reset_val == 0) {
    _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
  }
  return _hotness_counter_reset_val;
}
196 197 198 199
bool NMethodSweeper::sweep_in_progress() {
  return (_current != NULL);
}

200 201 202 203
// Scans the stacks of all Java threads and marks activations of not-entrant methods.
// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
// safepoint.
void NMethodSweeper::mark_active_nmethods() {
D
duke 已提交
204
  assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
205 206 207 208 209
  // If we do not want to reclaim not-entrant or zombie methods there is no need
  // to scan stacks
  if (!MethodFlushing) {
    return;
  }
D
duke 已提交
210

211 212 213
  // Increase time so that we can estimate when to invoke the sweeper again.
  _time_counter++;

D
duke 已提交
214 215
  // Check for restart
  assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
216 217 218 219 220
  if (!sweep_in_progress()) {
    _seen = 0;
    _sweep_fractions_left = NmethodSweepFraction;
    _current = CodeCache::first_nmethod();
    _traversals += 1;
221
    _total_time_this_sweep = Tickspan();
S
sla 已提交
222

D
duke 已提交
223 224 225
    if (PrintMethodFlushing) {
      tty->print_cr("### Sweep: stack traversal %d", _traversals);
    }
226
    Threads::nmethods_do(&mark_activation_closure);
D
duke 已提交
227

228 229 230
  } else {
    // Only set hotness counter
    Threads::nmethods_do(&set_hotness_closure);
D
duke 已提交
231 232
  }

233
  OrderAccess::storestore();
D
duke 已提交
234
}
235 236 237 238 239 240
/**
 * This function invokes the sweeper if at least one of the three conditions is met:
 *    (1) The code cache is getting full
 *    (2) There are sufficient state changes in/since the last sweep.
 *    (3) We have not been sweeping for 'some time'
 */
241
void NMethodSweeper::possibly_sweep() {
242
  assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
243 244
  // Only compiler threads are allowed to sweep
  if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
245 246
    return;
  }
247

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
  // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
  // This is one of the two places where should_sweep can be set to true. The general
  // idea is as follows: If there is enough free space in the code cache, there is no
  // need to invoke the sweeper. The following formula (which determines whether to invoke
  // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
  // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
  // the formula considers how much space in the code cache is currently used. Here are
  // some examples that will (hopefully) help in understanding.
  //
  // Small ReservedCodeCacheSizes:  (e.g., < 16M) We invoke the sweeper every time, since
  //                                              the result of the division is 0. This
  //                                              keeps the used code cache size small
  //                                              (important for embedded Java)
  // Large ReservedCodeCacheSize :  (e.g., 256M + code cache is 10% full). The formula
  //                                              computes: (256 / 16) - 1 = 15
  //                                              As a result, we invoke the sweeper after
  //                                              15 invocations of 'mark_active_nmethods.
  // Large ReservedCodeCacheSize:   (e.g., 256M + code Cache is 90% full). The formula
  //                                              computes: (256 / 16) - 10 = 6.
  if (!_should_sweep) {
268 269 270 271 272 273 274 275
    const int time_since_last_sweep = _time_counter - _last_sweep;
    // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
    // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
    // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
    // value) that disables the intended periodic sweeps.
    const int max_wait_time = ReservedCodeCacheSize / (16 * M);
    double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
    assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
276 277 278 279 280 281 282

    if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
      _should_sweep = true;
    }
  }

  if (_should_sweep && _sweep_fractions_left > 0) {
283 284 285 286 287
    // Only one thread at a time will sweep
    jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
    if (old != 0) {
      return;
    }
288 289 290
#ifdef ASSERT
    if (LogSweeper && _records == NULL) {
      // Create the ring buffer for the logging code
Z
zgu 已提交
291
      _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
292 293 294
      memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
    }
#endif
295 296

    if (_sweep_fractions_left > 0) {
297
      sweep_code_cache();
298 299 300 301 302
      _sweep_fractions_left--;
    }

    // We are done with sweeping the code cache once.
    if (_sweep_fractions_left == 0) {
303
      _total_nof_code_cache_sweeps++;
304 305 306 307 308 309 310 311 312 313 314
      _last_sweep = _time_counter;
      // Reset flag; temporarily disables sweeper
      _should_sweep = false;
      // If there was enough state change, 'possibly_enable_sweeper()'
      // sets '_should_sweep' to true
      possibly_enable_sweeper();
      // Reset _bytes_changed only if there was enough state change. _bytes_changed
      // can further increase by calls to 'report_state_change'.
      if (_should_sweep) {
        _bytes_changed = 0;
      }
315
    }
316 317
    // Release work, because another compiler thread could continue.
    OrderAccess::release_store((int*)&_sweep_started, 0);
318 319 320 321
  }
}

void NMethodSweeper::sweep_code_cache() {
322
  Ticks sweep_start_counter = Ticks::now();
S
sla 已提交
323

324 325 326
  _flushed_count                = 0;
  _zombified_count              = 0;
  _marked_for_reclamation_count = 0;
S
sla 已提交
327

328
  if (PrintMethodFlushing && Verbose) {
329
    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
330 331
  }

332 333 334
  if (!CompileBroker::should_compile_new_jobs()) {
    // If we have turned off compilations we might as well do full sweeps
    // in order to reach the clean state faster. Otherwise the sleeping compiler
335
    // threads will slow down sweeping.
336
    _sweep_fractions_left = 1;
337 338
  }

339 340 341 342 343
  // We want to visit all nmethods after NmethodSweepFraction
  // invocations so divide the remaining number of nmethods by the
  // remaining number of invocations.  This is only an estimate since
  // the number of nmethods changes during the sweep so the final
  // stage must iterate until it there are no more nmethods.
344
  int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
345
  int swept_count = 0;
346

347

348 349 350
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
  assert(!CodeCache_lock->owned_by_self(), "just checking");

351
  int freed_memory = 0;
352 353 354
  {
    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);

355
    // The last invocation iterates until there are no more nmethods
356
    for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
357
      swept_count++;
358 359
      if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
        if (PrintMethodFlushing && Verbose) {
360
          tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
361 362
        }
        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
363

364 365 366 367 368
        assert(Thread::current()->is_Java_thread(), "should be java thread");
        JavaThread* thread = (JavaThread*)Thread::current();
        ThreadBlockInVM tbivm(thread);
        thread->java_suspend_self();
      }
369 370 371
      // Since we will give up the CodeCache_lock, always skip ahead
      // to the next nmethod.  Other blobs can be deleted by other
      // threads but nmethods are only reclaimed by the sweeper.
372
      nmethod* next = CodeCache::next_nmethod(_current);
373 374 375 376

      // Now ready to process nmethod and give up CodeCache_lock
      {
        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
377
        freed_memory += process_nmethod(_current);
378 379 380 381 382 383
      }
      _seen++;
      _current = next;
    }
  }

384
  assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
385

386 387
  const Ticks sweep_end_counter = Ticks::now();
  const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
S
sla 已提交
388 389 390
  _total_time_sweeping  += sweep_time;
  _total_time_this_sweep += sweep_time;
  _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
391
  _total_flushed_size += freed_memory;
S
sla 已提交
392 393 394 395 396 397 398
  _total_nof_methods_reclaimed += _flushed_count;

  EventSweepCodeCache event(UNTIMED);
  if (event.should_commit()) {
    event.set_starttime(sweep_start_counter);
    event.set_endtime(sweep_end_counter);
    event.set_sweepIndex(_traversals);
399
    event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
400
    event.set_sweptCount(swept_count);
S
sla 已提交
401
    event.set_flushedCount(_flushed_count);
402
    event.set_markedCount(_marked_for_reclamation_count);
S
sla 已提交
403 404 405 406
    event.set_zombifiedCount(_zombified_count);
    event.commit();
  }

407 408
#ifdef ASSERT
  if(PrintMethodFlushing) {
409 410
    tty->print_cr("### sweeper:      sweep time(%d): "
      INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
411 412
  }
#endif
413

414
  if (_sweep_fractions_left == 1) {
S
sla 已提交
415
    _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
416 417
    log_sweep("finished");
  }
418

419 420 421 422 423 424 425 426 427
  // Sweeper is the only case where memory is released, check here if it
  // is time to restart the compiler. Only checking if there is a certain
  // amount of free memory in the code cache might lead to re-enabling
  // compilation although no memory has been released. For example, there are
  // cases when compilation was disabled although there is 4MB (or more) free
  // memory in the code cache. The reason is code cache fragmentation. Therefore,
  // it only makes sense to re-enable compilation if we have actually freed memory.
  // Note that typically several kB are released for sweeping 16MB of the code
  // cache. As a result, 'freed_memory' > 0 to restart the compiler.
428
  if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
429 430 431
    CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
    log_sweep("restart_compiler");
  }
432 433
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/**
 * This function updates the sweeper statistics that keep track of nmethods
 * state changes. If there is 'enough' state change, the sweeper is invoked
 * as soon as possible. There can be data races on _bytes_changed. The data
 * races are benign, since it does not matter if we loose a couple of bytes.
 * In the worst case we call the sweeper a little later. Also, we are guaranteed
 * to invoke the sweeper if the code cache gets full.
 */
void NMethodSweeper::report_state_change(nmethod* nm) {
  _bytes_changed += nm->total_size();
  possibly_enable_sweeper();
}

/**
 * Function determines if there was 'enough' state change in the code cache to invoke
 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in
 * the code cache since the last sweep.
 */
void NMethodSweeper::possibly_enable_sweeper() {
  double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
  if (percent_changed > 1.0) {
    _should_sweep = true;
  }
}

459 460 461 462 463 464
class NMethodMarker: public StackObj {
 private:
  CompilerThread* _thread;
 public:
  NMethodMarker(nmethod* nm) {
    _thread = CompilerThread::current();
465 466
    if (!nm->is_zombie() && !nm->is_unloaded()) {
      // Only expose live nmethods for scanning
467 468
      _thread->set_scanned_nmethod(nm);
    }
469
  }
470 471 472 473 474
  ~NMethodMarker() {
    _thread->set_scanned_nmethod(NULL);
  }
};

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
void NMethodSweeper::release_nmethod(nmethod *nm) {
  // Clean up any CompiledICHolders
  {
    ResourceMark rm;
    MutexLocker ml_patch(CompiledIC_lock);
    RelocIterator iter(nm);
    while (iter.next()) {
      if (iter.type() == relocInfo::virtual_call_type) {
        CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
      }
    }
  }

  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  nm->flush();
}
D
duke 已提交
491

492
int NMethodSweeper::process_nmethod(nmethod *nm) {
493 494
  assert(!CodeCache_lock->owned_by_self(), "just checking");

495
  int freed_memory = 0;
496
  // Make sure this nmethod doesn't get unloaded during the scan,
497
  // since safepoints may happen during acquired below locks.
498 499 500
  NMethodMarker nmm(nm);
  SWEEP(nm);

D
duke 已提交
501 502 503 504
  // Skip methods that are currently referenced by the VM
  if (nm->is_locked_by_vm()) {
    // But still remember to clean-up inline caches for alive nmethods
    if (nm->is_alive()) {
505
      // Clean inline caches that point to zombie/non-entrant methods
506
      MutexLocker cl(CompiledIC_lock);
D
duke 已提交
507
      nm->cleanup_inline_caches();
508
      SWEEP(nm);
D
duke 已提交
509
    }
510
    return freed_memory;
D
duke 已提交
511 512 513
  }

  if (nm->is_zombie()) {
514 515
    // If it is the first time we see nmethod then we mark it. Otherwise,
    // we reclaim it. When we have seen a zombie method twice, we know that
516
    // there are no inline caches that refer to it.
D
duke 已提交
517 518
    if (nm->is_marked_for_reclamation()) {
      assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
Y
ysr 已提交
519
      if (PrintMethodFlushing && Verbose) {
520
        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
Y
ysr 已提交
521
      }
522
      freed_memory = nm->total_size();
523 524 525
      if (nm->is_compiled_by_c2()) {
        _total_nof_c2_methods_reclaimed++;
      }
526
      release_nmethod(nm);
S
sla 已提交
527
      _flushed_count++;
D
duke 已提交
528
    } else {
Y
ysr 已提交
529
      if (PrintMethodFlushing && Verbose) {
530
        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
Y
ysr 已提交
531
      }
D
duke 已提交
532
      nm->mark_for_reclamation();
533 534 535
      // Keep track of code cache state change
      _bytes_changed += nm->total_size();
      _marked_for_reclamation_count++;
536
      SWEEP(nm);
D
duke 已提交
537 538
    }
  } else if (nm->is_not_entrant()) {
539
    // If there are no current activations of this method on the
D
duke 已提交
540
    // stack we can safely convert it to a zombie method
541
    if (nm->can_convert_to_zombie()) {
Y
ysr 已提交
542
      if (PrintMethodFlushing && Verbose) {
543
        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
Y
ysr 已提交
544
      }
545 546 547 548
      // Clear ICStubs to prevent back patching stubs of zombie or unloaded
      // nmethods during the next safepoint (see ICStub::finalize).
      MutexLocker cl(CompiledIC_lock);
      nm->clear_ic_stubs();
549
      // Code cache state change is tracked in make_zombie()
D
duke 已提交
550
      nm->make_zombie();
S
sla 已提交
551
      _zombified_count++;
552
      SWEEP(nm);
D
duke 已提交
553 554
    } else {
      // Still alive, clean up its inline caches
555
      MutexLocker cl(CompiledIC_lock);
D
duke 已提交
556
      nm->cleanup_inline_caches();
557
      SWEEP(nm);
D
duke 已提交
558 559 560
    }
  } else if (nm->is_unloaded()) {
    // Unloaded code, just make it a zombie
561
    if (PrintMethodFlushing && Verbose) {
562
      tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
563
    }
Y
ysr 已提交
564
    if (nm->is_osr_method()) {
565
      SWEEP(nm);
566
      // No inline caches will ever point to osr methods, so we can just remove it
567
      freed_memory = nm->total_size();
568 569 570
      if (nm->is_compiled_by_c2()) {
        _total_nof_c2_methods_reclaimed++;
      }
571
      release_nmethod(nm);
S
sla 已提交
572
      _flushed_count++;
D
duke 已提交
573
    } else {
574 575 576 577 578 579
      {
        // Clean ICs of unloaded nmethods as well because they may reference other
        // unloaded nmethods that may be flushed earlier in the sweeper cycle.
        MutexLocker cl(CompiledIC_lock);
        nm->cleanup_inline_caches();
      }
580
      // Code cache state change is tracked in make_zombie()
D
duke 已提交
581
      nm->make_zombie();
S
sla 已提交
582
      _zombified_count++;
583
      SWEEP(nm);
D
duke 已提交
584 585
    }
  } else {
586
    if (UseCodeCacheFlushing) {
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
      if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
        // Do not make native methods and OSR-methods not-entrant
        nm->dec_hotness_counter();
        // Get the initial value of the hotness counter. This value depends on the
        // ReservedCodeCacheSize
        int reset_val = hotness_counter_reset_val();
        int time_since_reset = reset_val - nm->hotness_counter();
        double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
        // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
        // I.e., 'threshold' increases with lower available space in the code cache and a higher
        // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
        // value until it is reset by stack walking - is smaller than the computed threshold, the
        // corresponding nmethod is considered for removal.
        if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
          // A method is marked as not-entrant if the method is
          // 1) 'old enough': nm->hotness_counter() < threshold
          // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
          //    The second condition is necessary if we are dealing with very small code cache
          //    sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
          //    The second condition ensures that methods are not immediately made not-entrant
          //    after compilation.
          nm->make_not_entrant();
609 610 611 612 613
          // Code cache state change is tracked in make_not_entrant()
          if (PrintMethodFlushing && Verbose) {
            tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
                          nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
          }
614
        }
615 616
      }
    }
617
    // Clean-up all inline caches that point to zombie/non-reentrant methods
618
    MutexLocker cl(CompiledIC_lock);
D
duke 已提交
619
    nm->cleanup_inline_caches();
620
    SWEEP(nm);
D
duke 已提交
621
  }
622
  return freed_memory;
D
duke 已提交
623
}
624

625 626 627 628
// Print out some state information about the current sweep and the
// state of the code cache if it's requested.
void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
  if (PrintMethodFlushing) {
629 630 631 632 633
    stringStream s;
    // Dump code cache state into a buffer before locking the tty,
    // because log_state() will use locks causing lock conflicts.
    CodeCache::log_state(&s);

634 635 636 637 638 639 640 641
    ttyLocker ttyl;
    tty->print("### sweeper: %s ", msg);
    if (format != NULL) {
      va_list ap;
      va_start(ap, format);
      tty->vprint(format, ap);
      va_end(ap);
    }
642
    tty->print_cr("%s", s.as_string());
643 644 645
  }

  if (LogCompilation && (xtty != NULL)) {
646 647 648 649 650
    stringStream s;
    // Dump code cache state into a buffer before locking the tty,
    // because log_state() will use locks causing lock conflicts.
    CodeCache::log_state(&s);

651
    ttyLocker ttyl;
652
    xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
653 654 655 656 657 658
    if (format != NULL) {
      va_list ap;
      va_start(ap, format);
      xtty->vprint(format, ap);
      va_end(ap);
    }
659
    xtty->print("%s", s.as_string());
660 661 662 663
    xtty->stamp();
    xtty->end_elem();
  }
}
664 665 666 667 668 669 670 671 672 673

void NMethodSweeper::print() {
  ttyLocker ttyl;
  tty->print_cr("Code cache sweeper statistics:");
  tty->print_cr("  Total sweep time:                %1.0lfms", (double)_total_time_sweeping.value()/1000000);
  tty->print_cr("  Total number of full sweeps:     %ld", _total_nof_code_cache_sweeps);
  tty->print_cr("  Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed,
                                                    _total_nof_c2_methods_reclaimed);
  tty->print_cr("  Total size of flushed methods:   " SIZE_FORMAT "kB", _total_flushed_size/K);
}