| OLD | NEW |
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 25 matching lines...) Expand all Loading... |
| 36 | 36 |
| 37 // The stub cache is used for megamorphic calls and property accesses. | 37 // The stub cache is used for megamorphic calls and property accesses. |
| 38 // It maps (map, name, type)->Code* | 38 // It maps (map, name, type)->Code* |
| 39 | 39 |
| 40 // The design of the table uses the inline cache stubs used for | 40 // The design of the table uses the inline cache stubs used for |
| 41 // mono-morphic calls. The beauty of this, we do not have to | 41 // mono-morphic calls. The beauty of this, we do not have to |
| 42 // invalidate the cache whenever a prototype map is changed. The stub | 42 // invalidate the cache whenever a prototype map is changed. The stub |
| 43 // validates the map chain as in the mono-morphic case. | 43 // validates the map chain as in the mono-morphic case. |
| 44 | 44 |
| 45 class SCTableReference; | 45 class SCTableReference; |
| 46 | 46 class StubCacheTypes { |
| 47 class StubCache : public AllStatic { | |
| 48 public: | 47 public: |
| 49 struct Entry { | 48 struct Entry { |
| 50 String* key; | 49 String* key; |
| 51 Code* value; | 50 Code* value; |
| 52 }; | 51 }; |
| 53 | 52 |
| 53 static const int kPrimaryTableSize = 2048; |
| 54 static const int kSecondaryTableSize = 512; |
| 55 }; |
| 54 | 56 |
| 57 class StubCacheData:public StubCacheTypes { |
| 58 Entry primary_[kPrimaryTableSize]; |
| 59 Entry secondary_[kSecondaryTableSize]; |
| 60 |
| 61 StubCacheData(); |
| 62 |
| 63 friend class StubCache; |
| 64 friend class V8Context; |
| 65 friend class SCTableReference; |
| 66 |
| 67 DISALLOW_COPY_AND_ASSIGN(StubCacheData); |
| 68 }; |
| 69 |
| 70 class StubCache : public AllStatic, public StubCacheTypes { |
| 71 public: |
| 55 static void Initialize(bool create_heap_objects); | 72 static void Initialize(bool create_heap_objects); |
| 56 | 73 |
| 57 // Computes the right stub matching. Inserts the result in the | 74 // Computes the right stub matching. Inserts the result in the |
| 58 // cache before returning. This might compile a stub if needed. | 75 // cache before returning. This might compile a stub if needed. |
| 59 static Object* ComputeLoadField(String* name, | 76 static Object* ComputeLoadField(String* name, |
| 60 JSObject* receiver, | 77 JSObject* receiver, |
| 61 JSObject* holder, | 78 JSObject* holder, |
| 62 int field_index); | 79 int field_index); |
| 63 | 80 |
| 64 static Object* ComputeLoadCallback(String* name, | 81 static Object* ComputeLoadCallback(String* name, |
| (...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 206 Register name, | 223 Register name, |
| 207 Register scratch, | 224 Register scratch, |
| 208 Register extra); | 225 Register extra); |
| 209 | 226 |
| 210 enum Table { | 227 enum Table { |
| 211 kPrimary, | 228 kPrimary, |
| 212 kSecondary | 229 kSecondary |
| 213 }; | 230 }; |
| 214 | 231 |
| 215 private: | 232 private: |
| 216 friend class SCTableReference; | |
| 217 static const int kPrimaryTableSize = 2048; | |
| 218 static const int kSecondaryTableSize = 512; | |
| 219 static Entry primary_[]; | |
| 220 static Entry secondary_[]; | |
| 221 | |
| 222 // Computes the hashed offsets for primary and secondary caches. | 233 // Computes the hashed offsets for primary and secondary caches. |
| 223 static int PrimaryOffset(String* name, Code::Flags flags, Map* map) { | 234 static int PrimaryOffset(String* name, Code::Flags flags, Map* map) { |
| 224 // This works well because the heap object tag size and the hash | 235 // This works well because the heap object tag size and the hash |
| 225 // shift are equal. Shifting down the length field to get the | 236 // shift are equal. Shifting down the length field to get the |
| 226 // hash code would effectively throw away two bits of the hash | 237 // hash code would effectively throw away two bits of the hash |
| 227 // code. | 238 // code. |
| 228 ASSERT(kHeapObjectTagSize == String::kHashShift); | 239 ASSERT(kHeapObjectTagSize == String::kHashShift); |
| 229 // Compute the hash of the name (use entire hash field). | 240 // Compute the hash of the name (use entire hash field). |
| 230 ASSERT(name->HasHashCode()); | 241 ASSERT(name->HasHashCode()); |
| 231 uint32_t field = name->hash_field(); | 242 uint32_t field = name->hash_field(); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 281 reinterpret_cast<Address>(&first_entry(table)->value)); | 292 reinterpret_cast<Address>(&first_entry(table)->value)); |
| 282 } | 293 } |
| 283 | 294 |
| 284 Address address() const { return address_; } | 295 Address address() const { return address_; } |
| 285 | 296 |
| 286 private: | 297 private: |
| 287 explicit SCTableReference(Address address) : address_(address) {} | 298 explicit SCTableReference(Address address) : address_(address) {} |
| 288 | 299 |
| 289 static StubCache::Entry* first_entry(StubCache::Table table) { | 300 static StubCache::Entry* first_entry(StubCache::Table table) { |
| 290 switch (table) { | 301 switch (table) { |
| 291 case StubCache::kPrimary: return StubCache::primary_; | 302 case StubCache::kPrimary: |
| 292 case StubCache::kSecondary: return StubCache::secondary_; | 303 return v8_context()->stub_cache_data_.primary_; |
| 304 case StubCache::kSecondary: |
| 305 return v8_context()->stub_cache_data_.secondary_; |
| 293 } | 306 } |
| 294 UNREACHABLE(); | 307 UNREACHABLE(); |
| 295 return NULL; | 308 return NULL; |
| 296 } | 309 } |
| 297 | 310 |
| 298 Address address_; | 311 Address address_; |
| 299 }; | 312 }; |
| 300 | 313 |
| 301 // ------------------------------------------------------------------------ | 314 // ------------------------------------------------------------------------ |
| 302 | 315 |
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 568 Object* CompileConstructStub(SharedFunctionInfo* shared); | 581 Object* CompileConstructStub(SharedFunctionInfo* shared); |
| 569 | 582 |
| 570 private: | 583 private: |
| 571 Object* GetCode(); | 584 Object* GetCode(); |
| 572 }; | 585 }; |
| 573 | 586 |
| 574 | 587 |
| 575 } } // namespace v8::internal | 588 } } // namespace v8::internal |
| 576 | 589 |
| 577 #endif // V8_STUB_CACHE_H_ | 590 #endif // V8_STUB_CACHE_H_ |
| OLD | NEW |