提交 8ca5c0e0 编写于 作者: I iveresov

6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")

6723229: NUMA allocator: assert(lgrp_num > 0, "There should be at least one locality group")
Summary: The fix takes care of the assertion triggered during TLAB resizing after reconfiguration. Also it now handles a defect in the topology graph, in which a single leaf node doesn't have memory.
Reviewed-by: jmasa
上级 c3850a84
...@@ -2658,6 +2658,12 @@ size_t os::numa_get_leaf_groups(int *ids, size_t size) { ...@@ -2658,6 +2658,12 @@ size_t os::numa_get_leaf_groups(int *ids, size_t size) {
top += r; top += r;
cur++; cur++;
} }
if (bottom == 0) {
// Handle a situation, when the OS reports no memory available.
// Assume UMA architecture.
ids[0] = 0;
return 1;
}
return bottom; return bottom;
} }
......
...@@ -58,6 +58,12 @@ class AdaptiveWeightedAverage : public CHeapObj { ...@@ -58,6 +58,12 @@ class AdaptiveWeightedAverage : public CHeapObj {
_average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) { _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
} }
void clear() {
_average = 0;
_sample_count = 0;
_last_sample = 0;
}
// Accessors // Accessors
float average() const { return _average; } float average() const { return _average; }
unsigned weight() const { return _weight; } unsigned weight() const { return _weight; }
...@@ -115,6 +121,12 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage { ...@@ -115,6 +121,12 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
float deviation() const { return _deviation; } float deviation() const { return _deviation; }
unsigned padding() const { return _padding; } unsigned padding() const { return _padding; }
void clear() {
AdaptiveWeightedAverage::clear();
_padded_avg = 0;
_deviation = 0;
}
// Override // Override
void sample(float new_sample); void sample(float new_sample);
}; };
......
...@@ -141,7 +141,20 @@ size_t MutableNUMASpace::free_in_words() const { ...@@ -141,7 +141,20 @@ size_t MutableNUMASpace::free_in_words() const {
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
guarantee(thr != NULL, "No thread"); guarantee(thr != NULL, "No thread");
int lgrp_id = thr->lgrp_id(); int lgrp_id = thr->lgrp_id();
assert(lgrp_id != -1, "No lgrp_id set"); if (lgrp_id == -1) {
// This case can occur after the topology of the system has
// changed. Thread can change their location, the new home
// group will be determined during the first allocation
// attempt. For now we can safely assume that all spaces
// have equal size because the whole space will be reinitialized.
if (lgrp_spaces()->length() > 0) {
return capacity_in_bytes() / lgrp_spaces()->length();
} else {
assert(false, "There should be at least one locality group");
return 0;
}
}
// That's the normal case, where we know the locality group of the thread.
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
if (i == -1) { if (i == -1) {
return 0; return 0;
...@@ -150,9 +163,17 @@ size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { ...@@ -150,9 +163,17 @@ size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
} }
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
// Please see the comments for tlab_capacity().
guarantee(thr != NULL, "No thread"); guarantee(thr != NULL, "No thread");
int lgrp_id = thr->lgrp_id(); int lgrp_id = thr->lgrp_id();
assert(lgrp_id != -1, "No lgrp_id set"); if (lgrp_id == -1) {
if (lgrp_spaces()->length() > 0) {
return free_in_bytes() / lgrp_spaces()->length();
} else {
assert(false, "There should be at least one locality group");
return 0;
}
}
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
if (i == -1) { if (i == -1) {
return 0; return 0;
...@@ -250,10 +271,15 @@ void MutableNUMASpace::free_region(MemRegion mr) { ...@@ -250,10 +271,15 @@ void MutableNUMASpace::free_region(MemRegion mr) {
void MutableNUMASpace::update() { void MutableNUMASpace::update() {
if (update_layout(false)) { if (update_layout(false)) {
// If the topology has changed, make all chunks zero-sized. // If the topology has changed, make all chunks zero-sized.
// And clear the alloc-rate statistics.
// In future we may want to handle this more gracefully in order
// to avoid the reallocation of the pages as much as possible.
for (int i = 0; i < lgrp_spaces()->length(); i++) { for (int i = 0; i < lgrp_spaces()->length(); i++) {
MutableSpace *s = lgrp_spaces()->at(i)->space(); LGRPSpace *ls = lgrp_spaces()->at(i);
MutableSpace *s = ls->space();
s->set_end(s->bottom()); s->set_end(s->bottom());
s->set_top(s->bottom()); s->set_top(s->bottom());
ls->clear_alloc_rate();
} }
// A NUMA space is never mangled // A NUMA space is never mangled
initialize(region(), initialize(region(),
......
...@@ -112,6 +112,7 @@ class MutableNUMASpace : public MutableSpace { ...@@ -112,6 +112,7 @@ class MutableNUMASpace : public MutableSpace {
int lgrp_id() const { return _lgrp_id; } int lgrp_id() const { return _lgrp_id; }
MutableSpace* space() const { return _space; } MutableSpace* space() const { return _space; }
AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; } AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; }
void clear_alloc_rate() { _alloc_rate->clear(); }
SpaceStats* space_stats() { return &_space_stats; } SpaceStats* space_stats() { return &_space_stats; }
void clear_space_stats() { _space_stats = SpaceStats(); } void clear_space_stats() { _space_stats = SpaceStats(); }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册