Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(370)

Unified Diff: fusl/src/string/memset.c

Issue 1714623002: [fusl] clang-format fusl (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: headers too Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: fusl/src/string/memset.c
diff --git a/fusl/src/string/memset.c b/fusl/src/string/memset.c
index f438b073ae099d69556a7ddb6a838973cb4b13d9..3bc1e2717c2207687641069d02fb9423e85f3f74 100644
--- a/fusl/src/string/memset.c
+++ b/fusl/src/string/memset.c
@@ -1,86 +1,92 @@
#include <string.h>
#include <stdint.h>
-void *memset(void *dest, int c, size_t n)
-{
- unsigned char *s = dest;
- size_t k;
+void* memset(void* dest, int c, size_t n) {
+ unsigned char* s = dest;
+ size_t k;
- /* Fill head and tail with minimal branching. Each
- * conditional ensures that all the subsequently used
- * offsets are well-defined and in the dest region. */
+ /* Fill head and tail with minimal branching. Each
+ * conditional ensures that all the subsequently used
+ * offsets are well-defined and in the dest region. */
- if (!n) return dest;
- s[0] = s[n-1] = c;
- if (n <= 2) return dest;
- s[1] = s[n-2] = c;
- s[2] = s[n-3] = c;
- if (n <= 6) return dest;
- s[3] = s[n-4] = c;
- if (n <= 8) return dest;
+ if (!n)
+ return dest;
+ s[0] = s[n - 1] = c;
+ if (n <= 2)
+ return dest;
+ s[1] = s[n - 2] = c;
+ s[2] = s[n - 3] = c;
+ if (n <= 6)
+ return dest;
+ s[3] = s[n - 4] = c;
+ if (n <= 8)
+ return dest;
- /* Advance pointer to align it at a 4-byte boundary,
- * and truncate n to a multiple of 4. The previous code
- * already took care of any head/tail that get cut off
- * by the alignment. */
+ /* Advance pointer to align it at a 4-byte boundary,
+ * and truncate n to a multiple of 4. The previous code
+ * already took care of any head/tail that get cut off
+ * by the alignment. */
- k = -(uintptr_t)s & 3;
- s += k;
- n -= k;
- n &= -4;
+ k = -(uintptr_t)s & 3;
+ s += k;
+ n -= k;
+ n &= -4;
#ifdef __GNUC__
- typedef uint32_t __attribute__((__may_alias__)) u32;
- typedef uint64_t __attribute__((__may_alias__)) u64;
+ typedef uint32_t __attribute__((__may_alias__)) u32;
+ typedef uint64_t __attribute__((__may_alias__)) u64;
- u32 c32 = ((u32)-1)/255 * (unsigned char)c;
+ u32 c32 = ((u32)-1) / 255 * (unsigned char)c;
- /* In preparation to copy 32 bytes at a time, aligned on
- * an 8-byte bounary, fill head/tail up to 28 bytes each.
- * As in the initial byte-based head/tail fill, each
- * conditional below ensures that the subsequent offsets
- * are valid (e.g. !(n<=24) implies n>=28). */
+ /* In preparation to copy 32 bytes at a time, aligned on
+ * an 8-byte bounary, fill head/tail up to 28 bytes each.
+ * As in the initial byte-based head/tail fill, each
+ * conditional below ensures that the subsequent offsets
+ * are valid (e.g. !(n<=24) implies n>=28). */
- *(u32 *)(s+0) = c32;
- *(u32 *)(s+n-4) = c32;
- if (n <= 8) return dest;
- *(u32 *)(s+4) = c32;
- *(u32 *)(s+8) = c32;
- *(u32 *)(s+n-12) = c32;
- *(u32 *)(s+n-8) = c32;
- if (n <= 24) return dest;
- *(u32 *)(s+12) = c32;
- *(u32 *)(s+16) = c32;
- *(u32 *)(s+20) = c32;
- *(u32 *)(s+24) = c32;
- *(u32 *)(s+n-28) = c32;
- *(u32 *)(s+n-24) = c32;
- *(u32 *)(s+n-20) = c32;
- *(u32 *)(s+n-16) = c32;
+ *(u32*)(s + 0) = c32;
+ *(u32*)(s + n - 4) = c32;
+ if (n <= 8)
+ return dest;
+ *(u32*)(s + 4) = c32;
+ *(u32*)(s + 8) = c32;
+ *(u32*)(s + n - 12) = c32;
+ *(u32*)(s + n - 8) = c32;
+ if (n <= 24)
+ return dest;
+ *(u32*)(s + 12) = c32;
+ *(u32*)(s + 16) = c32;
+ *(u32*)(s + 20) = c32;
+ *(u32*)(s + 24) = c32;
+ *(u32*)(s + n - 28) = c32;
+ *(u32*)(s + n - 24) = c32;
+ *(u32*)(s + n - 20) = c32;
+ *(u32*)(s + n - 16) = c32;
- /* Align to a multiple of 8 so we can fill 64 bits at a time,
- * and avoid writing the same bytes twice as much as is
- * practical without introducing additional branching. */
+ /* Align to a multiple of 8 so we can fill 64 bits at a time,
+ * and avoid writing the same bytes twice as much as is
+ * practical without introducing additional branching. */
- k = 24 + ((uintptr_t)s & 4);
- s += k;
- n -= k;
+ k = 24 + ((uintptr_t)s & 4);
+ s += k;
+ n -= k;
- /* If this loop is reached, 28 tail bytes have already been
- * filled, so any remainder when n drops below 32 can be
- * safely ignored. */
+ /* If this loop is reached, 28 tail bytes have already been
+ * filled, so any remainder when n drops below 32 can be
+ * safely ignored. */
- u64 c64 = c32 | ((u64)c32 << 32);
- for (; n >= 32; n-=32, s+=32) {
- *(u64 *)(s+0) = c64;
- *(u64 *)(s+8) = c64;
- *(u64 *)(s+16) = c64;
- *(u64 *)(s+24) = c64;
- }
+ u64 c64 = c32 | ((u64)c32 << 32);
+ for (; n >= 32; n -= 32, s += 32) {
+ *(u64*)(s + 0) = c64;
+ *(u64*)(s + 8) = c64;
+ *(u64*)(s + 16) = c64;
+ *(u64*)(s + 24) = c64;
+ }
#else
- /* Pure C fallback with no aliasing violations. */
- for (; n; n--, s++) *s = c;
+ /* Pure C fallback with no aliasing violations. */
+ for (; n; n--, s++)
+ *s = c;
#endif
- return dest;
+ return dest;
}

Powered by Google App Engine
This is Rietveld 408576698