heapRegionManager.cpp 16.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25
#include "precompiled.hpp"
26
#include "gc_implementation/g1/heapRegion.hpp"
27
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
28
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
29
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30
#include "gc_implementation/g1/concurrentG1Refine.hpp"
31
#include "memory/allocation.hpp"
32

33
void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
34 35 36 37 38 39
                               G1RegionToSpaceMapper* prev_bitmap,
                               G1RegionToSpaceMapper* next_bitmap,
                               G1RegionToSpaceMapper* bot,
                               G1RegionToSpaceMapper* cardtable,
                               G1RegionToSpaceMapper* card_counts) {
  _allocated_heapregions_length = 0;
40

41
  _heap_mapper = heap_storage;
42

43 44 45 46 47 48 49
  _prev_bitmap_mapper = prev_bitmap;
  _next_bitmap_mapper = next_bitmap;

  _bot_mapper = bot;
  _cardtable_mapper = cardtable;

  _card_counts_mapper = card_counts;
50

51 52 53 54 55
  MemRegion reserved = heap_storage->reserved();
  _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);

  _available_map.resize(_regions.length(), false);
  _available_map.clear();
56 57
}

58
bool HeapRegionManager::is_available(uint region) const {
59
  return _available_map.at(region);
60 61 62
}

#ifdef ASSERT
63
bool HeapRegionManager::is_free(HeapRegion* hr) const {
64 65 66 67
  return _free_list.contains(hr);
}
#endif

68
HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
S
sjohanss 已提交
69 70
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
71 72
  MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
  assert(reserved().contains(mr), "invariant");
S
sjohanss 已提交
73
  return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
74 75
}

76
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
77 78 79
  guarantee(num_regions > 0, "Must commit more than zero regions");
  guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");

80 81 82 83 84 85 86 87 88 89 90 91
  _num_committed += (uint)num_regions;

  _heap_mapper->commit_regions(index, num_regions);

  // Also commit auxiliary data
  _prev_bitmap_mapper->commit_regions(index, num_regions);
  _next_bitmap_mapper->commit_regions(index, num_regions);

  _bot_mapper->commit_regions(index, num_regions);
  _cardtable_mapper->commit_regions(index, num_regions);

  _card_counts_mapper->commit_regions(index, num_regions);
92 93
}

94
void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
95
  guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
96 97 98 99 100 101 102
  guarantee(_num_committed >= num_regions, "pre-condition");

  // Print before uncommitting.
  if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
    for (uint i = start; i < start + num_regions; i++) {
      HeapRegion* hr = at(i);
      G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
103 104
    }
  }
105 106 107

  _num_committed -= (uint)num_regions;

108 109 110 111 112 113 114 115 116 117 118
  _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
  _heap_mapper->uncommit_regions(start, num_regions);

  // Also uncommit auxiliary data
  _prev_bitmap_mapper->uncommit_regions(start, num_regions);
  _next_bitmap_mapper->uncommit_regions(start, num_regions);

  _bot_mapper->uncommit_regions(start, num_regions);
  _cardtable_mapper->uncommit_regions(start, num_regions);

  _card_counts_mapper->uncommit_regions(start, num_regions);
119 120
}

121
void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
122 123 124 125 126 127 128
  guarantee(num_regions > 0, "No point in calling this for zero regions");
  commit_regions(start, num_regions);
  for (uint i = start; i < start + num_regions; i++) {
    if (_regions.get_by_index(i) == NULL) {
      HeapRegion* new_hr = new_heap_region(i);
      _regions.set_by_index(i, new_hr);
      _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
129 130
    }
  }
131

132
  _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147

  for (uint i = start; i < start + num_regions; i++) {
    assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
    HeapRegion* hr = at(i);
    if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
      G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
    }
    HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
    MemRegion mr(bottom, bottom + HeapRegion::GrainWords);

    hr->initialize(mr);
    insert_into_free_list(at(i));
  }
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
  size_t used_sz =
    _prev_bitmap_mapper->committed_size() +
    _next_bitmap_mapper->committed_size() +
    _bot_mapper->committed_size() +
    _cardtable_mapper->committed_size() +
    _card_counts_mapper->committed_size();

  size_t committed_sz =
    _prev_bitmap_mapper->reserved_size() +
    _next_bitmap_mapper->reserved_size() +
    _bot_mapper->reserved_size() +
    _cardtable_mapper->reserved_size() +
    _card_counts_mapper->reserved_size();

  return MemoryUsage(0, used_sz, committed_sz, committed_sz);
}

166
uint HeapRegionManager::expand_by(uint num_regions) {
167
  return expand_at(0, num_regions);
168 169
}

170
uint HeapRegionManager::expand_at(uint start, uint num_regions) {
171 172 173 174 175 176 177
  if (num_regions == 0) {
    return 0;
  }

  uint cur = start;
  uint idx_last_found = 0;
  uint num_last_found = 0;
178

179
  uint expanded = 0;
180

181 182 183 184 185 186 187
  while (expanded < num_regions &&
         (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
    uint to_expand = MIN2(num_regions - expanded, num_last_found);
    make_regions_available(idx_last_found, to_expand);
    expanded += to_expand;
    cur = idx_last_found + num_last_found + 1;
  }
188

189 190
  verify_optional();
  return expanded;
191
}
192

193
uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
194 195 196 197 198 199 200 201 202
  uint found = 0;
  size_t length_found = 0;
  uint cur = 0;

  while (length_found < num && cur < max_length()) {
    HeapRegion* hr = _regions.get_by_index(cur);
    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
      // This region is a potential candidate for allocation into.
      length_found++;
203
    } else {
204 205 206
      // This region is not a candidate. The next region is the next possible one.
      found = cur + 1;
      length_found = 0;
207
    }
208 209
    cur++;
  }
210

211 212 213 214 215 216 217
  if (length_found == num) {
    for (uint i = found; i < (found + num); i++) {
      HeapRegion* hr = _regions.get_by_index(i);
      // sanity check
      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
                err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
                        " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
218
    }
219 220
    return found;
  } else {
221
    return G1_NO_HRM_INDEX;
222 223
  }
}
224

225
HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
226
  guarantee(r != NULL, "Start region must be a valid region");
227 228
  guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index()));
  for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
229 230 231 232
    HeapRegion* hr = _regions.get_by_index(i);
    if (is_available(i)) {
      return hr;
    }
233
  }
234
  return NULL;
235 236
}

237
void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
238 239 240 241 242 243 244 245 246 247 248
  uint len = max_length();

  for (uint i = 0; i < len; i++) {
    if (!is_available(i)) {
      continue;
    }
    guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
    bool res = blk->doHeapRegion(at(i));
    if (res) {
      blk->incomplete();
      return;
249 250
    }
  }
251 252
}

253
uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
254 255 256 257 258 259 260 261 262 263 264
  guarantee(res_idx != NULL, "checking");
  guarantee(start_idx <= (max_length() + 1), "checking");

  uint num_regions = 0;

  uint cur = start_idx;
  while (cur < max_length() && is_available(cur)) {
    cur++;
  }
  if (cur == max_length()) {
    return num_regions;
265
  }
266 267 268
  *res_idx = cur;
  while (cur < max_length() && !is_available(cur)) {
    cur++;
269
  }
270 271 272 273 274 275 276 277 278
  num_regions = cur - *res_idx;
#ifdef ASSERT
  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
    assert(!is_available(i), "just checking");
  }
  assert(cur == max_length() || num_regions == 0 || is_available(cur),
         err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
#endif
  return num_regions;
279 280
}

281
uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
282
  return num_regions * worker_i / num_workers;
283 284
}

285
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
286
  const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
287

288 289 290 291 292 293 294 295 296 297
  // Every worker will actually look at all regions, skipping over regions that
  // are currently not committed.
  // This also (potentially) iterates over regions newly allocated during GC. This
  // is no problem except for some extra work.
  for (uint count = 0; count < _allocated_heapregions_length; count++) {
    const uint index = (start_index + count) % _allocated_heapregions_length;
    assert(0 <= index && index < _allocated_heapregions_length, "sanity");
    // Skip over unavailable regions
    if (!is_available(index)) {
      continue;
298
    }
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
    HeapRegion* r = _regions.get_by_index(index);
    // We'll ignore "continues humongous" regions (we'll process them
    // when we come across their corresponding "start humongous"
    // region) and regions already claimed.
    if (r->claim_value() == claim_value || r->continuesHumongous()) {
      continue;
    }
    // OK, try to claim it
    if (!r->claimHeapRegion(claim_value)) {
      continue;
    }
    // Success!
    if (r->startsHumongous()) {
      // If the region is "starts humongous" we'll iterate over its
      // "continues humongous" first; in fact we'll do them
      // first. The order is important. In one case, calling the
      // closure on the "starts humongous" region might de-allocate
      // and clear all its "continues humongous" regions and, as a
      // result, we might end up processing them twice. So, we'll do
      // them first (note: most closures will ignore them anyway) and
      // then we'll do the "starts humongous" region.
      for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
        HeapRegion* chr = _regions.get_by_index(ch_index);

        assert(chr->continuesHumongous(), "Must be humongous region");
        assert(chr->humongous_start_region() == r,
               err_msg("Must work on humongous continuation of the original start region "
                       PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
        assert(chr->claim_value() != claim_value,
               "Must not have been claimed yet because claiming of humongous continuation first claims the start region");

        bool claim_result = chr->claimHeapRegion(claim_value);
        // We should always be able to claim it; no one else should
        // be trying to claim this region.
        guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");

        bool res2 = blk->doHeapRegion(chr);
        if (res2) {
          return;
        }

        // Right now, this holds (i.e., no closure that actually
        // does something with "continues humongous" regions
        // clears them). We might have to weaken it in the future,
        // but let's leave these two asserts here for extra safety.
        assert(chr->continuesHumongous(), "should still be the case");
        assert(chr->humongous_start_region() == r, "sanity");
      }
    }

    bool res = blk->doHeapRegion(r);
350
    if (res) {
351 352 353 354 355
      return;
    }
  }
}

356
uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
357
  assert(length() > 0, "the region sequence should not be empty");
358 359
  assert(length() <= _allocated_heapregions_length, "invariant");
  assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
360
  assert(num_regions_to_remove < length(), "We should never remove all regions");
361

362 363 364
  if (num_regions_to_remove == 0) {
    return 0;
  }
365

366 367 368 369 370
  uint removed = 0;
  uint cur = _allocated_heapregions_length - 1;
  uint idx_last_found = 0;
  uint num_last_found = 0;

371 372
  while ((removed < num_regions_to_remove) &&
      (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
373 374 375 376 377 378 379 380 381 382 383 384 385
    uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);

    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);

    cur -= num_last_found;
    removed += to_remove;
  }

  verify_optional();

  return removed;
}

386
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
387 388 389 390 391 392 393 394 395 396 397
  guarantee(start_idx < _allocated_heapregions_length, "checking");
  guarantee(res_idx != NULL, "checking");

  uint num_regions_found = 0;

  jlong cur = start_idx;
  while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
    cur--;
  }
  if (cur == -1) {
    return num_regions_found;
398
  }
399 400 401 402 403 404 405
  jlong old_cur = cur;
  // cur indexes the first empty region
  while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
    cur--;
  }
  *res_idx = cur + 1;
  num_regions_found = old_cur - cur;
406

407 408 409
#ifdef ASSERT
  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
    assert(at(i)->is_empty(), "just checking");
410
  }
411 412
#endif
  return num_regions_found;
413 414
}

415
void HeapRegionManager::verify() {
416
  guarantee(length() <= _allocated_heapregions_length,
417
            err_msg("invariant: _length: %u _allocated_length: %u",
418 419
                    length(), _allocated_heapregions_length));
  guarantee(_allocated_heapregions_length <= max_length(),
420
            err_msg("invariant: _allocated_length: %u _max_length: %u",
421
                    _allocated_heapregions_length, max_length()));
422

423 424
  bool prev_committed = true;
  uint num_committed = 0;
425
  HeapWord* prev_end = heap_bottom();
426 427 428 429 430 431
  for (uint i = 0; i < _allocated_heapregions_length; i++) {
    if (!is_available(i)) {
      prev_committed = false;
      continue;
    }
    num_committed++;
432
    HeapRegion* hr = _regions.get_by_index(i);
433
    guarantee(hr != NULL, err_msg("invariant: i: %u", i));
434
    guarantee(!prev_committed || hr->bottom() == prev_end,
435
              err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
436
                      i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
437 438
    guarantee(hr->hrm_index() == i,
              err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index()));
439 440 441 442 443 444 445
    // Asserts will fire if i is >= _length
    HeapWord* addr = hr->bottom();
    guarantee(addr_to_region(addr) == hr, "sanity");
    // We cannot check whether the region is part of a particular set: at the time
    // this method may be called, we have only completed allocation of the regions,
    // but not put into a region set.
    prev_committed = true;
446 447 448 449 450 451
    if (hr->startsHumongous()) {
      prev_end = hr->orig_end();
    } else {
      prev_end = hr->end();
    }
  }
452
  for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
453
    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
454
  }
455 456 457 458 459 460

  guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
  _free_list.verify();
}

#ifndef PRODUCT
461
void HeapRegionManager::verify_optional() {
462
  verify();
463
}
464
#endif // PRODUCT
465