sweeper.cpp 16.9 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 */

# include "incls/_precompiled.incl"
# include "incls/_sweeper.cpp.incl"

long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
CodeBlob* NMethodSweeper::_current = NULL;   // Current nmethod
int       NMethodSweeper::_seen = 0 ;        // No. of blobs we have currently processed in current pass of CodeCache
int       NMethodSweeper::_invocations = 0;  // No. of invocations left until we are completed with this pass

jint      NMethodSweeper::_locked_seen = 0;
jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool      NMethodSweeper::_rescan = false;
36 37
bool      NMethodSweeper::_do_sweep = false;
jint      NMethodSweeper::_sweep_started = 0;
38 39 40 41 42
bool      NMethodSweeper::_was_full = false;
jint      NMethodSweeper::_advise_to_sweep = 0;
jlong     NMethodSweeper::_last_was_full = 0;
uint      NMethodSweeper::_highest_marked = 0;
long      NMethodSweeper::_was_full_traversal = 0;
D
duke 已提交
43

44 45 46 47 48 49 50 51 52 53 54
class MarkActivationClosure: public CodeBlobClosure {
public:
  virtual void do_code_blob(CodeBlob* cb) {
    // If we see an activation belonging to a non_entrant nmethod, we mark it.
    if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
      ((nmethod*)cb)->mark_as_seen_on_stack();
    }
  }
};
static MarkActivationClosure mark_activation_closure;

55
void NMethodSweeper::scan_stacks() {
D
duke 已提交
56 57
  assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
  if (!MethodFlushing) return;
58
  _do_sweep = true;
D
duke 已提交
59 60 61

  // No need to synchronize access, since this is always executed at a
  // safepoint.  If we aren't in the middle of scan and a rescan
62 63 64 65 66 67 68
  // hasn't been requested then just return. If UseCodeCacheFlushing is on and
  // code cache flushing is in progress, don't skip sweeping to help make progress
  // clearing space in the code cache.
  if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
    _do_sweep = false;
    return;
  }
D
duke 已提交
69 70 71 72 73 74 75 76 77 78

  // Make sure CompiledIC_lock in unlocked, since we might update some
  // inline caches. If it is, we just bail-out and try later.
  if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;

  // Check for restart
  assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
  if (_current == NULL) {
    _seen        = 0;
    _invocations = NmethodSweepFraction;
79
    _current     = CodeCache::first_nmethod();
D
duke 已提交
80 81 82 83
    _traversals  += 1;
    if (PrintMethodFlushing) {
      tty->print_cr("### Sweep: stack traversal %d", _traversals);
    }
84
    Threads::nmethods_do(&mark_activation_closure);
D
duke 已提交
85 86 87 88 89 90 91

    // reset the flags since we started a scan from the beginning.
    _rescan = false;
    _locked_seen = 0;
    _not_entrant_seen_on_stack = 0;
  }

92 93
  if (UseCodeCacheFlushing) {
    if (!CodeCache::needs_flushing()) {
94
      // scan_stacks() runs during a safepoint, no race with setters
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
      _advise_to_sweep = 0;
    }

    if (was_full()) {
      // There was some progress so attempt to restart the compiler
      jlong now           = os::javaTimeMillis();
      jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
      jlong curr_interval = now - _last_was_full;
      if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
        CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
        set_was_full(false);

        // Update the _last_was_full time so we can tell how fast the
        // code cache is filling up
        _last_was_full = os::javaTimeMillis();

        if (PrintMethodFlushing) {
          tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
            CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
        }
        if (LogCompilation && (xtty != NULL)) {
          ttyLocker ttyl;
          xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
                           CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
          xtty->stamp();
          xtty->end_elem();
        }
      }
    }
  }
D
duke 已提交
125 126
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
void NMethodSweeper::possibly_sweep() {
  if ((!MethodFlushing) || (!_do_sweep)) return;

  if (_invocations > 0) {
    // Only one thread at a time will sweep
    jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
    if (old != 0) {
      return;
    }
    sweep_code_cache();
  }
  _sweep_started = 0;
}

void NMethodSweeper::sweep_code_cache() {
#ifdef ASSERT
  jlong sweep_start;
  if(PrintMethodFlushing) {
    sweep_start = os::javaTimeMillis();
  }
#endif
  if (PrintMethodFlushing && Verbose) {
    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
  }

  // We want to visit all nmethods after NmethodSweepFraction invocations.
  // If invocation is 1 we do the rest
  int todo = CodeCache::nof_blobs();
  if (_invocations > 1) {
    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
  }

  // Compilers may check to sweep more often than stack scans happen,
  // don't keep trying once it is all scanned
  _invocations--;

  assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
  assert(!CodeCache_lock->owned_by_self(), "just checking");

  {
    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);

    for(int i = 0; i < todo && _current != NULL; i++) {

      // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
      // Other blobs can be deleted by other threads
      // Read next before we potentially delete current
      CodeBlob* next = CodeCache::next_nmethod(_current);

      // Now ready to process nmethod and give up CodeCache_lock
      {
        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
        process_nmethod((nmethod *)_current);
      }
      _seen++;
      _current = next;
    }

    // Skip forward to the next nmethod (if any). Code blobs other than nmethods
    // can be freed async to us and make _current invalid while we sleep.
    _current = CodeCache::next_nmethod(_current);
  }

  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
    // we've completed a scan without making progress but there were
    // nmethods we were unable to process either because they were
    // locked or were still on stack.  We don't have to aggresively
    // clean them up so just stop scanning.  We could scan once more
    // but that complicates the control logic and it's unlikely to
    // matter much.
    if (PrintMethodFlushing) {
      tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
    }
  }

#ifdef ASSERT
  if(PrintMethodFlushing) {
    jlong sweep_end             = os::javaTimeMillis();
    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
  }
#endif
}

D
duke 已提交
210 211

void NMethodSweeper::process_nmethod(nmethod *nm) {
212 213
  assert(!CodeCache_lock->owned_by_self(), "just checking");

D
duke 已提交
214 215 216 217 218
  // Skip methods that are currently referenced by the VM
  if (nm->is_locked_by_vm()) {
    // But still remember to clean-up inline caches for alive nmethods
    if (nm->is_alive()) {
      // Clean-up all inline caches that points to zombie/non-reentrant methods
219
      MutexLocker cl(CompiledIC_lock);
D
duke 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232
      nm->cleanup_inline_caches();
    } else {
      _locked_seen++;
    }
    return;
  }

  if (nm->is_zombie()) {
    // If it is first time, we see nmethod then we mark it. Otherwise,
    // we reclame it. When we have seen a zombie method twice, we know that
    // there are no inline caches that referes to it.
    if (nm->is_marked_for_reclamation()) {
      assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
Y
ysr 已提交
233
      if (PrintMethodFlushing && Verbose) {
234
        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
Y
ysr 已提交
235
      }
236
      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
D
duke 已提交
237 238
      nm->flush();
    } else {
Y
ysr 已提交
239
      if (PrintMethodFlushing && Verbose) {
240
        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
Y
ysr 已提交
241
      }
D
duke 已提交
242 243 244 245 246 247 248
      nm->mark_for_reclamation();
      _rescan = true;
    }
  } else if (nm->is_not_entrant()) {
    // If there is no current activations of this method on the
    // stack we can safely convert it to a zombie method
    if (nm->can_not_entrant_be_converted()) {
Y
ysr 已提交
249
      if (PrintMethodFlushing && Verbose) {
250
        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
Y
ysr 已提交
251
      }
D
duke 已提交
252 253 254 255
      nm->make_zombie();
      _rescan = true;
    } else {
      // Still alive, clean up its inline caches
256
      MutexLocker cl(CompiledIC_lock);
D
duke 已提交
257 258 259
      nm->cleanup_inline_caches();
      // we coudn't transition this nmethod so don't immediately
      // request a rescan.  If this method stays on the stack for a
260
      // long time we don't want to keep rescanning the code cache.
D
duke 已提交
261 262 263 264
      _not_entrant_seen_on_stack++;
    }
  } else if (nm->is_unloaded()) {
    // Unloaded code, just make it a zombie
Y
ysr 已提交
265
    if (PrintMethodFlushing && Verbose)
266
      tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
Y
ysr 已提交
267
    if (nm->is_osr_method()) {
D
duke 已提交
268
      // No inline caches will ever point to osr methods, so we can just remove it
269
      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
D
duke 已提交
270 271 272 273 274 275 276
      nm->flush();
    } else {
      nm->make_zombie();
      _rescan = true;
    }
  } else {
    assert(nm->is_alive(), "should be alive");
277 278 279 280 281 282 283 284 285 286

    if (UseCodeCacheFlushing) {
      if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
          (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
          CodeCache::needs_flushing()) {
        // This method has not been called since the forced cleanup happened
        nm->make_not_entrant();
      }
    }

D
duke 已提交
287
    // Clean-up all inline caches that points to zombie/non-reentrant methods
288
    MutexLocker cl(CompiledIC_lock);
D
duke 已提交
289 290 291
    nm->cleanup_inline_caches();
  }
}
292 293 294 295 296

// Code cache unloading: when compilers notice the code cache is getting full,
// they will call a vm op that comes here. This code attempts to speculatively
// unload the oldest half of the nmethods (based on the compile job id) by
// saving the old code in a list in the CodeCache. Then
297 298
// execution resumes. If a method so marked is not called by the second sweeper
// stack traversal after the current one, the nmethod will be marked non-entrant and
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
// got rid of by normal sweeping. If the method is called, the methodOop's
// _code field is restored and the methodOop/nmethod
// go back to their normal state.
void NMethodSweeper::handle_full_code_cache(bool is_full) {
  // Only the first one to notice can advise us to start early cleaning
  if (!is_full){
    jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
    if (old != 0) {
      return;
    }
  }

  if (is_full) {
    // Since code cache is full, immediately stop new compiles
    bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
    if (!did_set) {
      // only the first to notice can start the cleaning,
      // others will go back and block
      return;
    }
    set_was_full(true);

    // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
    jlong now = os::javaTimeMillis();
    jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
    jlong curr_interval = now - _last_was_full;
    if (curr_interval < max_interval) {
      _rescan = true;
      if (PrintMethodFlushing) {
        tty->print_cr("### handle full too often, turning off compiler");
      }
      if (LogCompilation && (xtty != NULL)) {
        ttyLocker ttyl;
        xtty->begin_elem("disable_compiler flushing_interval='" UINT64_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
                         curr_interval/1000, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
        xtty->stamp();
        xtty->end_elem();
      }
      return;
    }
  }

  VM_HandleFullCodeCache op(is_full);
  VMThread::execute(&op);

  // rescan again as soon as possible
  _rescan = true;
}

void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
  // If there was a race in detecting full code cache, only run
  // one vm op for it or keep the compiler shut off

  debug_only(jlong start = os::javaTimeMillis();)

  if ((!was_full()) && (is_full)) {
    if (!CodeCache::needs_flushing()) {
      if (PrintMethodFlushing) {
        tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
          CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
      }
      if (LogCompilation && (xtty != NULL)) {
        ttyLocker ttyl;
        xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
                         CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
        xtty->stamp();
        xtty->end_elem();
      }
      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
      return;
    }
  }

  // Traverse the code cache trying to dump the oldest nmethods
  uint curr_max_comp_id = CompileBroker::get_compilation_id();
  uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
  if (PrintMethodFlushing && Verbose) {
    tty->print_cr("### Cleaning code cache: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes",
        CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
  }
  if (LogCompilation && (xtty != NULL)) {
    ttyLocker ttyl;
    xtty->begin_elem("start_cleaning_code_cache live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
                      CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
    xtty->stamp();
    xtty->end_elem();
  }

  nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
  jint disconnected = 0;
  jint made_not_entrant  = 0;
  while ((nm != NULL)){
    uint curr_comp_id = nm->compile_id();

    // OSR methods cannot be flushed like this. Also, don't flush native methods
    // since they are part of the JDK in most cases
    if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
        (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {

      if ((nm->method()->code() == nm)) {
        // This method has not been previously considered for
        // unloading or it was restored already
        CodeCache::speculatively_disconnect(nm);
        disconnected++;
      } else if (nm->is_speculatively_disconnected()) {
        // This method was previously considered for preemptive unloading and was not called since then
        nm->method()->invocation_counter()->decay();
        nm->method()->backedge_counter()->decay();
        nm->make_not_entrant();
        made_not_entrant++;
      }

      if (curr_comp_id > _highest_marked) {
        _highest_marked = curr_comp_id;
      }
    }
    nm = CodeCache::alive_nmethod(CodeCache::next(nm));
  }

  if (LogCompilation && (xtty != NULL)) {
    ttyLocker ttyl;
    xtty->begin_elem("stop_cleaning_code_cache disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
                      disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
    xtty->stamp();
    xtty->end_elem();
  }

426 427
  // Shut off compiler. Sweeper will start over with a new stack scan and
  // traversal cycle and turn it back on if it clears enough space.
428 429 430 431 432 433 434 435 436 437 438 439 440 441
  if (was_full()) {
    _last_was_full = os::javaTimeMillis();
    CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
  }

  // After two more traversals the sweeper will get rid of unrestored nmethods
  _was_full_traversal = _traversals;
#ifdef ASSERT
  jlong end = os::javaTimeMillis();
  if(PrintMethodFlushing && Verbose) {
    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
  }
#endif
}