Index: runtime/vm/class_table.cc |
diff --git a/runtime/vm/class_table.cc b/runtime/vm/class_table.cc |
index 9432a1beeeca0f7e8c84c3fc73ea41bd68379715..b04f77c7b09c8d7d0370e365ba93035b10d7b257 100644 |
--- a/runtime/vm/class_table.cc |
+++ b/runtime/vm/class_table.cc |
@@ -180,6 +180,42 @@ void ClassTable::RegisterAt(intptr_t index, const Class& cls) { |
} |
+void ClassTable::RegisterAt(intptr_t index, RawClass* cls) { |
+ ASSERT(Thread::Current()->IsMutatorThread()); |
+ ASSERT(index != kIllegalCid); |
+ ASSERT(index >= kNumPredefinedCids); |
+ if (index >= capacity_) { |
+ // Grow the capacity of the class table. |
+ // TODO(koda): Add ClassTable::Grow to share code. |
+ intptr_t new_capacity = index + capacity_increment_; |
+ if (!Class::is_valid_id(index) || new_capacity < capacity_) { |
+ FATAL1("Fatal error in ClassTable::Register: invalid index %" Pd "\n", |
+ index); |
+ } |
+ RawClass** new_table = reinterpret_cast<RawClass**>( |
+ malloc(new_capacity * sizeof(RawClass*))); // NOLINT |
+ memmove(new_table, table_, capacity_ * sizeof(RawClass*)); |
+ ClassHeapStats* new_stats_table = reinterpret_cast<ClassHeapStats*>( |
+ realloc(class_heap_stats_table_, |
+ new_capacity * sizeof(ClassHeapStats))); // NOLINT |
+ for (intptr_t i = capacity_; i < new_capacity; i++) { |
+ new_table[i] = NULL; |
+ new_stats_table[i].Initialize(); |
+ } |
+ capacity_ = new_capacity; |
+ old_tables_->Add(table_); |
+ table_ = new_table; // TODO(koda): This should use atomics. |
+ class_heap_stats_table_ = new_stats_table; |
+ ASSERT(capacity_increment_ >= 1); |
+ } |
+ ASSERT(table_[index] == 0); |
siva
2016/06/21 23:51:43
This piece of code is duplicated here and above, m
rmacnak
2016/06/22 19:42:17
Factored out as AllocateIndex, which ensures capac
|
+ table_[index] = cls; |
+ if (index >= top_) { |
+ top_ = index + 1; |
+ } |
+} |
+ |
+ |
#if defined(DEBUG) |
void ClassTable::Unregister(intptr_t index) { |
table_[index] = 0; |