OLD | NEW |
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
357 | 357 |
358 ASSERT(top_ == max_nof_chunks_); // all chunks are free | 358 ASSERT(top_ == max_nof_chunks_); // all chunks are free |
359 top_ = 0; | 359 top_ = 0; |
360 capacity_ = 0; | 360 capacity_ = 0; |
361 capacity_executable_ = 0; | 361 capacity_executable_ = 0; |
362 size_ = 0; | 362 size_ = 0; |
363 max_nof_chunks_ = 0; | 363 max_nof_chunks_ = 0; |
364 } | 364 } |
365 | 365 |
366 | 366 |
367 void MemoryAllocator::FreeChunkTables(AtomicWord* array, int len, int level) { | 367 void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) { |
368 for (int i = 0; i < len; i++) { | 368 for (int i = 0; i < len; i++) { |
369 if (array[i] != kUnusedChunkTableEntry) { | 369 if (array[i] != kUnusedChunkTableEntry) { |
370 AtomicWord* subarray = reinterpret_cast<AtomicWord*>(array[i]); | 370 uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]); |
371 if (level > 1) { | 371 if (level > 1) { |
372 Release_Store(&array[i], kUnusedChunkTableEntry); | 372 array[i] = kUnusedChunkTableEntry; |
373 FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1); | 373 FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1); |
374 } else { | 374 } else { |
375 Release_Store(&array[i], kUnusedChunkTableEntry); | 375 array[i] = kUnusedChunkTableEntry; |
376 } | 376 } |
377 delete[] subarray; | 377 delete[] subarray; |
378 } | 378 } |
379 } | 379 } |
380 } | 380 } |
381 | 381 |
382 | 382 |
383 void* MemoryAllocator::AllocateRawMemory(const size_t requested, | 383 void* MemoryAllocator::AllocateRawMemory(const size_t requested, |
384 size_t* allocated, | 384 size_t* allocated, |
385 Executability executable) { | 385 Executability executable) { |
(...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
815 void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) { | 815 void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) { |
816 ASSERT(size == kChunkSize); | 816 ASSERT(size == kChunkSize); |
817 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); | 817 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); |
818 AddChunkUsingAddress(int_address, int_address); | 818 AddChunkUsingAddress(int_address, int_address); |
819 AddChunkUsingAddress(int_address, int_address + size - 1); | 819 AddChunkUsingAddress(int_address, int_address + size - 1); |
820 } | 820 } |
821 | 821 |
822 | 822 |
823 void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start, | 823 void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start, |
824 uintptr_t chunk_index_base) { | 824 uintptr_t chunk_index_base) { |
825 AtomicWord* fine_grained = AllocatedChunksFinder( | 825 uintptr_t* fine_grained = AllocatedChunksFinder( |
826 chunk_table_, | 826 chunk_table_, |
827 chunk_index_base, | 827 chunk_index_base, |
828 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, | 828 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, |
829 kCreateTablesAsNeeded); | 829 kCreateTablesAsNeeded); |
830 int index = FineGrainedIndexForAddress(chunk_index_base); | 830 int index = FineGrainedIndexForAddress(chunk_index_base); |
831 if (fine_grained[index] != kUnusedChunkTableEntry) index++; | 831 if (fine_grained[index] != kUnusedChunkTableEntry) index++; |
832 ASSERT(fine_grained[index] == kUnusedChunkTableEntry); | 832 ASSERT(fine_grained[index] == kUnusedChunkTableEntry); |
833 Release_Store(&fine_grained[index], chunk_start); | 833 fine_grained[index] = chunk_start; |
834 } | 834 } |
835 | 835 |
836 | 836 |
837 void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) { | 837 void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) { |
838 ASSERT(size == kChunkSize); | 838 ASSERT(size == kChunkSize); |
839 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); | 839 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); |
840 RemoveChunkFoundUsingAddress(int_address, int_address); | 840 RemoveChunkFoundUsingAddress(int_address, int_address); |
841 RemoveChunkFoundUsingAddress(int_address, int_address + size - 1); | 841 RemoveChunkFoundUsingAddress(int_address, int_address + size - 1); |
842 } | 842 } |
843 | 843 |
844 | 844 |
845 void MemoryAllocator::RemoveChunkFoundUsingAddress( | 845 void MemoryAllocator::RemoveChunkFoundUsingAddress( |
846 uintptr_t chunk_start, | 846 uintptr_t chunk_start, |
847 uintptr_t chunk_index_base) { | 847 uintptr_t chunk_index_base) { |
848 AtomicWord* fine_grained = AllocatedChunksFinder( | 848 uintptr_t* fine_grained = AllocatedChunksFinder( |
849 chunk_table_, | 849 chunk_table_, |
850 chunk_index_base, | 850 chunk_index_base, |
851 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, | 851 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, |
852 kDontCreateTables); | 852 kDontCreateTables); |
853 // Can't remove an entry that's not there. | 853 // Can't remove an entry that's not there. |
854 ASSERT(fine_grained != kUnusedChunkTableEntry); | 854 ASSERT(fine_grained != kUnusedChunkTableEntry); |
855 int index = FineGrainedIndexForAddress(chunk_index_base); | 855 int index = FineGrainedIndexForAddress(chunk_index_base); |
856 ASSERT(fine_grained[index] != kUnusedChunkTableEntry); | 856 ASSERT(fine_grained[index] != kUnusedChunkTableEntry); |
857 if (fine_grained[index] != static_cast<AtomicWord>(chunk_start)) { | 857 if (fine_grained[index] != chunk_start) { |
858 index++; | 858 index++; |
859 ASSERT(fine_grained[index] == static_cast<AtomicWord>(chunk_start)); | 859 ASSERT(fine_grained[index] == chunk_start); |
860 Release_Store(&fine_grained[index], kUnusedChunkTableEntry); | 860 fine_grained[index] = kUnusedChunkTableEntry; |
861 } else { | 861 } else { |
862 Release_Store(&fine_grained[index], fine_grained[index + 1]); | 862 // If only one of the entries is used it must be the first, since |
863 // Here for a moment the two entries are duplicates, but the reader can | 863 // InAllocatedChunks relies on that. Move things around so that this is |
864 // handle that. | 864 // the case. |
865 NoBarrier_Store(&fine_grained[index + 1], kUnusedChunkTableEntry); | 865 fine_grained[index] = fine_grained[index + 1]; |
| 866 fine_grained[index + 1] = kUnusedChunkTableEntry; |
866 } | 867 } |
867 } | 868 } |
868 | 869 |
869 | 870 |
870 bool MemoryAllocator::InAllocatedChunks(Address addr) { | 871 bool MemoryAllocator::InAllocatedChunks(Address addr) { |
871 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); | 872 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); |
872 AtomicWord* fine_grained = AllocatedChunksFinder( | 873 uintptr_t* fine_grained = AllocatedChunksFinder( |
873 chunk_table_, | 874 chunk_table_, |
874 int_address, | 875 int_address, |
875 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, | 876 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, |
876 kDontCreateTables); | 877 kDontCreateTables); |
877 if (fine_grained == NULL) return false; | 878 if (fine_grained == NULL) return false; |
878 int index = FineGrainedIndexForAddress(int_address); | 879 int index = FineGrainedIndexForAddress(int_address); |
879 if (fine_grained[index] == kUnusedChunkTableEntry) return false; | 880 if (fine_grained[index] == kUnusedChunkTableEntry) return false; |
880 uintptr_t entry = static_cast<uintptr_t>(fine_grained[index]); | 881 uintptr_t entry = fine_grained[index]; |
881 if (entry <= int_address && entry + kChunkSize > int_address) return true; | 882 if (entry <= int_address && entry + kChunkSize > int_address) return true; |
882 index++; | 883 index++; |
883 if (fine_grained[index] == kUnusedChunkTableEntry) return false; | 884 if (fine_grained[index] == kUnusedChunkTableEntry) return false; |
884 entry = static_cast<uintptr_t>(fine_grained[index]); | 885 entry = fine_grained[index]; |
885 // At this point it would seem that we must have a hit, but there is a small | |
886 // window during RemoveChunkFoundUsingAddress where the two entries are | |
887 // duplicates and we have to handle that. | |
888 if (entry <= int_address && entry + kChunkSize > int_address) return true; | 886 if (entry <= int_address && entry + kChunkSize > int_address) return true; |
889 return false; | 887 return false; |
890 } | 888 } |
891 | 889 |
892 | 890 |
893 AtomicWord* MemoryAllocator::AllocatedChunksFinder( | 891 uintptr_t* MemoryAllocator::AllocatedChunksFinder( |
894 AtomicWord* table, | 892 uintptr_t* table, |
895 uintptr_t address, | 893 uintptr_t address, |
896 int bit_position, | 894 int bit_position, |
897 CreateTables create_as_needed) { | 895 CreateTables create_as_needed) { |
898 if (bit_position == kChunkSizeLog2) { | 896 if (bit_position == kChunkSizeLog2) { |
899 return table; | 897 return table; |
900 } | 898 } |
901 ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel); | 899 ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel); |
902 int index = | 900 int index = |
903 ((address >> bit_position) & | 901 ((address >> bit_position) & |
904 ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1)); | 902 ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1)); |
905 uintptr_t more_fine_grained_address = | 903 uintptr_t more_fine_grained_address = |
906 address & ((V8_INTPTR_C(1) << bit_position) - 1); | 904 address & ((V8_INTPTR_C(1) << bit_position) - 1); |
907 ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) || | 905 ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) || |
908 (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel)); | 906 (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel)); |
909 AtomicWord* more_fine_grained_table = | 907 uintptr_t* more_fine_grained_table = |
910 reinterpret_cast<AtomicWord*>(table[index]); | 908 reinterpret_cast<uintptr_t*>(table[index]); |
911 if (more_fine_grained_table == kUnusedChunkTableEntry) { | 909 if (more_fine_grained_table == kUnusedChunkTableEntry) { |
912 if (create_as_needed == kDontCreateTables) return NULL; | 910 if (create_as_needed == kDontCreateTables) return NULL; |
913 int words_needed = 1 << kChunkTableBitsPerLevel; | 911 int words_needed = 1 << kChunkTableBitsPerLevel; |
914 if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) { | 912 if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) { |
915 words_needed = | 913 words_needed = |
916 (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry; | 914 (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry; |
917 } | 915 } |
918 more_fine_grained_table = new AtomicWord[words_needed]; | 916 more_fine_grained_table = new uintptr_t[words_needed]; |
919 for (int i = 0; i < words_needed; i++) { | 917 for (int i = 0; i < words_needed; i++) { |
920 more_fine_grained_table[i] = kUnusedChunkTableEntry; | 918 more_fine_grained_table[i] = kUnusedChunkTableEntry; |
921 } | 919 } |
922 Release_Store(&table[index], | 920 table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table); |
923 reinterpret_cast<AtomicWord>(more_fine_grained_table)); | |
924 } | 921 } |
925 return AllocatedChunksFinder( | 922 return AllocatedChunksFinder( |
926 more_fine_grained_table, | 923 more_fine_grained_table, |
927 more_fine_grained_address, | 924 more_fine_grained_address, |
928 bit_position - kChunkTableBitsPerLevel, | 925 bit_position - kChunkTableBitsPerLevel, |
929 create_as_needed); | 926 create_as_needed); |
930 } | 927 } |
931 | 928 |
932 | 929 |
933 AtomicWord MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries]; | 930 uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries]; |
934 | 931 |
935 | 932 |
936 // ----------------------------------------------------------------------------- | 933 // ----------------------------------------------------------------------------- |
937 // PagedSpace implementation | 934 // PagedSpace implementation |
938 | 935 |
939 PagedSpace::PagedSpace(intptr_t max_capacity, | 936 PagedSpace::PagedSpace(intptr_t max_capacity, |
940 AllocationSpace id, | 937 AllocationSpace id, |
941 Executability executable) | 938 Executability executable) |
942 : Space(id, executable) { | 939 : Space(id, executable) { |
943 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 940 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
(...skipping 2301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3245 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3242 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
3246 if (obj->IsCode()) { | 3243 if (obj->IsCode()) { |
3247 Code* code = Code::cast(obj); | 3244 Code* code = Code::cast(obj); |
3248 code_kind_statistics[code->kind()] += code->Size(); | 3245 code_kind_statistics[code->kind()] += code->Size(); |
3249 } | 3246 } |
3250 } | 3247 } |
3251 } | 3248 } |
3252 #endif // DEBUG | 3249 #endif // DEBUG |
3253 | 3250 |
3254 } } // namespace v8::internal | 3251 } } // namespace v8::internal |
OLD | NEW |