| OLD | NEW |
| 1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 1112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1123 } | 1123 } |
| 1124 | 1124 |
| 1125 // Helper for do_malloc(). | 1125 // Helper for do_malloc(). |
| 1126 inline void* do_malloc_pages(ThreadCache* heap, size_t size) { | 1126 inline void* do_malloc_pages(ThreadCache* heap, size_t size) { |
| 1127 void* result; | 1127 void* result; |
| 1128 bool report_large; | 1128 bool report_large; |
| 1129 | 1129 |
| 1130 Length num_pages = tcmalloc::pages(size); | 1130 Length num_pages = tcmalloc::pages(size); |
| 1131 size = num_pages << kPageShift; | 1131 size = num_pages << kPageShift; |
| 1132 | 1132 |
| 1133 heap->AddToByteAllocatedTotal(size); // Chromium profiling. |
| 1134 |
| 1133 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | 1135 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { |
| 1134 result = DoSampledAllocation(size); | 1136 result = DoSampledAllocation(size); |
| 1135 | 1137 |
| 1136 SpinLockHolder h(Static::pageheap_lock()); | 1138 SpinLockHolder h(Static::pageheap_lock()); |
| 1137 report_large = should_report_large(num_pages); | 1139 report_large = should_report_large(num_pages); |
| 1138 } else { | 1140 } else { |
| 1139 SpinLockHolder h(Static::pageheap_lock()); | 1141 SpinLockHolder h(Static::pageheap_lock()); |
| 1140 Span* span = Static::pageheap()->New(num_pages); | 1142 Span* span = Static::pageheap()->New(num_pages); |
| 1141 result = (span == NULL ? NULL : SpanToMallocResult(span)); | 1143 result = (span == NULL ? NULL : SpanToMallocResult(span)); |
| 1142 report_large = should_report_large(num_pages); | 1144 report_large = should_report_large(num_pages); |
| 1143 } | 1145 } |
| 1144 | 1146 |
| 1145 if (report_large) { | 1147 if (report_large) { |
| 1146 ReportLargeAlloc(num_pages, result); | 1148 ReportLargeAlloc(num_pages, result); |
| 1147 } | 1149 } |
| 1148 return result; | 1150 return result; |
| 1149 } | 1151 } |
| 1150 | 1152 |
| 1151 inline void* do_malloc(size_t size) { | 1153 inline void* do_malloc(size_t size) { |
| 1152 AddRoomForMark(&size); | 1154 AddRoomForMark(&size); |
| 1153 | 1155 |
| 1154 void* ret = NULL; | 1156 void* ret = NULL; |
| 1155 | 1157 |
| 1156 // The following call forces module initialization | 1158 // The following call forces module initialization |
| 1157 ThreadCache* heap = ThreadCache::GetCache(); | 1159 ThreadCache* heap = ThreadCache::GetCache(); |
| 1158 if (size <= kMaxSize) { | 1160 if (size <= kMaxSize) { |
| 1159 size_t cl = Static::sizemap()->SizeClass(size); | 1161 size_t cl = Static::sizemap()->SizeClass(size); |
| 1160 size = Static::sizemap()->class_to_size(cl); | 1162 size = Static::sizemap()->class_to_size(cl); |
| 1161 | 1163 |
| 1164 // TODO(jar): If this has any detectable performance impact, it can be |
| 1165 // optimized by only tallying sizes if the profiler was activated to recall |
| 1166 // these tallies. I don't think this is performance critical, but we really |
| 1167 // should measure it. |
| 1168 heap->AddToByteAllocatedTotal(size); // Chromium profiling. |
| 1169 |
| 1162 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | 1170 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { |
| 1163 ret = DoSampledAllocation(size); | 1171 ret = DoSampledAllocation(size); |
| 1164 MarkAllocatedRegion(ret); | 1172 MarkAllocatedRegion(ret); |
| 1165 } else { | 1173 } else { |
| 1166 // The common case, and also the simplest. This just pops the | 1174 // The common case, and also the simplest. This just pops the |
| 1167 // size-appropriate freelist, after replenishing it if it's empty. | 1175 // size-appropriate freelist, after replenishing it if it's empty. |
| 1168 ret = CheckedMallocResult(heap->Allocate(size, cl)); | 1176 ret = CheckedMallocResult(heap->Allocate(size, cl)); |
| 1169 } | 1177 } |
| 1170 } else { | 1178 } else { |
| 1171 ret = do_malloc_pages(heap, size); | 1179 ret = do_malloc_pages(heap, size); |
| (...skipping 756 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1928 *mark = ~allocated_mark; // Distinctively not allocated. | 1936 *mark = ~allocated_mark; // Distinctively not allocated. |
| 1929 } | 1937 } |
| 1930 | 1938 |
| 1931 static void MarkAllocatedRegion(void* ptr) { | 1939 static void MarkAllocatedRegion(void* ptr) { |
| 1932 if (ptr == NULL) return; | 1940 if (ptr == NULL) return; |
| 1933 MarkType* mark = GetMarkLocation(ptr); | 1941 MarkType* mark = GetMarkLocation(ptr); |
| 1934 *mark = GetMarkValue(ptr, mark); | 1942 *mark = GetMarkValue(ptr, mark); |
| 1935 } | 1943 } |
| 1936 | 1944 |
| 1937 #endif // TCMALLOC_VALIDATION | 1945 #endif // TCMALLOC_VALIDATION |
| OLD | NEW |