OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
118 inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms, | 118 inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms, |
119 int length) { | 119 int length) { |
120 return Vector< Handle<Object> >( | 120 return Vector< Handle<Object> >( |
121 reinterpret_cast<v8::internal::Handle<Object>*>(elms), length); | 121 reinterpret_cast<v8::internal::Handle<Object>*>(elms), length); |
122 } | 122 } |
123 | 123 |
124 | 124 |
125 // ---------------------------------------------------------------------------- | 125 // ---------------------------------------------------------------------------- |
126 // Memory | 126 // Memory |
127 | 127 |
128 // Copies data from |src| to |dst|. The data spans must not overlap. | 128 // Copies words from |src| to |dst|. The data spans must not overlap. |
129 template <typename T> | 129 template <typename T> |
130 inline void CopyWords(T* dst, T* src, int num_words) { | 130 inline void CopyWords(T* dst, const T* src, size_t num_words) { |
131 STATIC_ASSERT(sizeof(T) == kPointerSize); | 131 STATIC_ASSERT(sizeof(T) == kPointerSize); |
132 ASSERT(Min(dst, src) + num_words <= Max(dst, src)); | 132 ASSERT(Min(dst, const_cast<T*>(src)) + num_words <= |
| 133 Max(dst, const_cast<T*>(src))); |
133 ASSERT(num_words > 0); | 134 ASSERT(num_words > 0); |
134 | 135 |
135 // Use block copying OS::MemCopy if the segment we're copying is | 136 // Use block copying OS::MemCopy if the segment we're copying is |
136 // enough to justify the extra call/setup overhead. | 137 // enough to justify the extra call/setup overhead. |
137 static const int kBlockCopyLimit = 16; | 138 static const size_t kBlockCopyLimit = 16; |
138 STATIC_ASSERT(kBlockCopyLimit * kPointerSize >= OS::kMinComplexMemCopy); | |
139 | 139 |
140 if (num_words >= kBlockCopyLimit) { | 140 if (num_words < kBlockCopyLimit) { |
| 141 do { |
| 142 num_words--; |
| 143 *dst++ = *src++; |
| 144 } while (num_words > 0); |
| 145 } else { |
141 OS::MemCopy(dst, src, num_words * kPointerSize); | 146 OS::MemCopy(dst, src, num_words * kPointerSize); |
| 147 } |
| 148 } |
| 149 |
| 150 |
| 151 // Copies words from |src| to |dst|. No restrictions. |
| 152 template <typename T> |
| 153 inline void MoveWords(T* dst, const T* src, size_t num_words) { |
| 154 STATIC_ASSERT(sizeof(T) == kPointerSize); |
| 155 ASSERT(num_words > 0); |
| 156 |
| 157 // Use block copying OS::MemCopy if the segment we're copying is |
| 158 // enough to justify the extra call/setup overhead. |
| 159 static const size_t kBlockCopyLimit = 16; |
| 160 |
| 161 if (num_words < kBlockCopyLimit && |
| 162 ((dst < src) || (dst >= (src + num_words * kPointerSize)))) { |
| 163 T* end = dst + num_words; |
| 164 do { |
| 165 num_words--; |
| 166 *dst++ = *src++; |
| 167 } while (num_words > 0); |
142 } else { | 168 } else { |
143 int remaining = num_words; | 169 OS::MemMove(dst, src, num_words * kPointerSize); |
144 do { | |
145 remaining--; | |
146 *dst++ = *src++; | |
147 } while (remaining > 0); | |
148 } | 170 } |
149 } | 171 } |
150 | 172 |
151 | 173 |
152 // Copies data from |src| to |dst|. The data spans must not overlap. | 174 // Copies data from |src| to |dst|. The data spans must not overlap. |
153 template <typename T> | 175 template <typename T> |
154 inline void CopyBytes(T* dst, T* src, size_t num_bytes) { | 176 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { |
155 STATIC_ASSERT(sizeof(T) == 1); | 177 STATIC_ASSERT(sizeof(T) == 1); |
156 ASSERT(Min(dst, src) + num_bytes <= Max(dst, src)); | 178 ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <= |
| 179 Max(dst, const_cast<T*>(src))); |
157 if (num_bytes == 0) return; | 180 if (num_bytes == 0) return; |
158 | 181 |
159 // Use block copying OS::MemCopy if the segment we're copying is | 182 // Use block copying OS::MemCopy if the segment we're copying is |
160 // enough to justify the extra call/setup overhead. | 183 // enough to justify the extra call/setup overhead. |
161 static const int kBlockCopyLimit = OS::kMinComplexMemCopy; | 184 static const int kBlockCopyLimit = OS::kMinComplexMemCopy; |
162 | 185 |
163 if (num_bytes >= static_cast<size_t>(kBlockCopyLimit)) { | 186 if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) { |
| 187 do { |
| 188 num_bytes--; |
| 189 *dst++ = *src++; |
| 190 } while (num_bytes > 0); |
| 191 } else { |
164 OS::MemCopy(dst, src, num_bytes); | 192 OS::MemCopy(dst, src, num_bytes); |
165 } else { | |
166 size_t remaining = num_bytes; | |
167 do { | |
168 remaining--; | |
169 *dst++ = *src++; | |
170 } while (remaining > 0); | |
171 } | 193 } |
172 } | 194 } |
173 | 195 |
| 196 |
| 197 // Copies data from |src| to |dst|. No restrictions. |
| 198 template <typename T> |
| 199 inline void MoveBytes(T* dst, const T* src, size_t num_bytes) { |
| 200 STATIC_ASSERT(sizeof(T) == 1); |
| 201 switch (num_bytes) { |
| 202 case 0: return; |
| 203 case 1: |
| 204 *dst = *src; |
| 205 return; |
| 206 #ifdef V8_HOST_CAN_READ_UNALIGNED |
| 207 case 2: |
| 208 *reinterpret_cast<uint16_t*>(dst) = *reinterpret_cast<const uint16_t*>(src); |
| 209 return; |
| 210 case 3: { |
| 211 uint16_t part1 = *reinterpret_cast<const uint16_t*>(src); |
| 212 byte part2 = *(src + 2); |
| 213 *reinterpret_cast<uint16_t*>(dst) = part1; |
| 214 *(dst + 2) = part2; |
| 215 return; |
| 216 } |
| 217 case 4: |
| 218 *reinterpret_cast<uint32_t*>(dst) = *reinterpret_cast<const uint32_t*>(src); |
| 219 return; |
| 220 case 5: |
| 221 case 6: |
| 222 case 7: |
| 223 case 8: { |
| 224 uint32_t part1 = *reinterpret_cast<const uint32_t*>(src); |
| 225 uint32_t part2 = *reinterpret_cast<const uint32_t*>(src + num_bytes - 4); |
| 226 *reinterpret_cast<uint32_t*>(dst) = part1; |
| 227 *reinterpret_cast<uint32_t*>(dst + num_bytes - 4) = part2; |
| 228 return; |
| 229 } |
| 230 case 9: |
| 231 case 10: |
| 232 case 11: |
| 233 case 12: |
| 234 case 13: |
| 235 case 14: |
| 236 case 15: |
| 237 case 16: { |
| 238 double part1 = *reinterpret_cast<const double*>(src); |
| 239 double part2 = *reinterpret_cast<const double*>(src + num_bytes - 8); |
| 240 *reinterpret_cast<double*>(dst) = part1; |
| 241 *reinterpret_cast<double*>(dst + num_bytes - 8) = part2; |
| 242 return; |
| 243 } |
| 244 #endif |
| 245 default: |
| 246 OS::MemMove(dst, src, num_bytes); |
| 247 return; |
| 248 } |
| 249 } |
| 250 |
174 | 251 |
175 template <typename T, typename U> | 252 template <typename T, typename U> |
176 inline void MemsetPointer(T** dest, U* value, int counter) { | 253 inline void MemsetPointer(T** dest, U* value, int counter) { |
177 #ifdef DEBUG | 254 #ifdef DEBUG |
178 T* a = NULL; | 255 T* a = NULL; |
179 U* b = NULL; | 256 U* b = NULL; |
180 a = b; // Fake assignment to check assignability. | 257 a = b; // Fake assignment to check assignability. |
181 USE(a); | 258 USE(a); |
182 #endif // DEBUG | 259 #endif // DEBUG |
183 #if defined(V8_HOST_ARCH_IA32) | 260 #if defined(V8_HOST_ARCH_IA32) |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
308 | 385 |
309 // Add formatted contents like printf based on a va_list. | 386 // Add formatted contents like printf based on a va_list. |
310 void AddFormattedList(const char* format, va_list list); | 387 void AddFormattedList(const char* format, va_list list); |
311 private: | 388 private: |
312 DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); | 389 DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); |
313 }; | 390 }; |
314 | 391 |
315 } } // namespace v8::internal | 392 } } // namespace v8::internal |
316 | 393 |
317 #endif // V8_V8UTILS_H_ | 394 #endif // V8_V8UTILS_H_ |
OLD | NEW |