| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. | |
| 3 * Use of this source code is governed by a BSD-style license that can be | |
| 4 * found in the LICENSE file. | |
| 5 */ | |
| 6 | |
| 7 /* | |
| 8 * Not Prometheus, just shared memory buffers. We do the following: | |
| 9 * we allocate and write into a NaClGioShm object until its allocation | |
| 10 * size limit is reached. Then, we double its size. This means we | |
| 11 * avoid quadratic copying. | |
| 12 * | |
| 13 * Also, we maintain the following invariant: all I/O operations are | |
| 14 * done once, rather than split. So if a write would grow a shm | |
| 15 * object, we grow before doing the write. This leads to more | |
| 16 * copying, but makes the I/O operations simpler. | |
| 17 */ | |
| 18 | |
| 19 #include <errno.h> | |
| 20 | |
| 21 #include "native_client/src/trusted/gio/gio_shm_unbounded.h" | |
| 22 | |
| 23 #include "native_client/src/shared/platform/nacl_log.h" | |
| 24 #include "native_client/src/trusted/desc/nacl_desc_base.h" | |
| 25 #include "native_client/src/trusted/service_runtime/nacl_config.h" | |
| 26 | |
| 27 | |
| 28 #if !defined(SIZE_T_MAX) | |
| 29 # define SIZE_T_MAX (~(size_t) 0) | |
| 30 #endif | |
| 31 | |
| 32 static ssize_t NaClGioShmUnboundedRead(struct Gio *vself, | |
| 33 void *buf, | |
| 34 size_t count) { | |
| 35 struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself; | |
| 36 ssize_t got; | |
| 37 size_t bytes_avail; | |
| 38 | |
| 39 NaClLog(4, | |
| 40 ("NaClGioShmUnboundedRead(0x%"NACL_PRIxPTR"," | |
| 41 " 0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n"), | |
| 42 (uintptr_t) vself, (uintptr_t) buf, count); | |
| 43 /* are we at the end, or did we seek pass the end? */ | |
| 44 if (self->io_offset >= self->shm_written) { | |
| 45 NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n", | |
| 46 self->io_offset, self->shm_written); | |
| 47 NaClLog(4, "I/O pointer outside of valid region, returning EOF\n"); | |
| 48 return 0; /* EOF */ | |
| 49 } | |
| 50 bytes_avail = self->shm_written - self->io_offset; | |
| 51 if (count > bytes_avail) { | |
| 52 count = bytes_avail; | |
| 53 } | |
| 54 NaClLog(4, | |
| 55 ("NaClGioShmUnboundedRead: reading from underlying" | |
| 56 " NaClGioShm 0x%"NACL_PRIxPTR"\n"), | |
| 57 (uintptr_t) self->ngsp); | |
| 58 got = (*self->ngsp->base.vtbl->Read)(&self->ngsp->base, buf, count); | |
| 59 NaClLog(4, | |
| 60 ("NaClGioShmUnboundedRead: got 0x%"NACL_PRIxS" bytes\n"), | |
| 61 (size_t) got); | |
| 62 if (-1 != got) { | |
| 63 self->io_offset += (size_t) got; | |
| 64 } | |
| 65 return got; | |
| 66 } | |
| 67 | |
| 68 static void GioCopy(struct Gio *dst, | |
| 69 struct Gio *src, | |
| 70 size_t nbytes) { | |
| 71 uint8_t buf[4096]; | |
| 72 uint8_t *bufp; | |
| 73 ssize_t got; | |
| 74 ssize_t this_copy; | |
| 75 size_t ask; | |
| 76 off_t cur_offset; | |
| 77 | |
| 78 NaClLog(3, | |
| 79 ("GioCopy: dst 0x%"NACL_PRIxPTR | |
| 80 ", src 0x%"NACL_PRIxPTR", nbytes 0x%"NACL_PRIxS"\n"), | |
| 81 (uintptr_t) dst, | |
| 82 (uintptr_t) src, | |
| 83 nbytes); | |
| 84 cur_offset = (*src->vtbl->Seek)(src, 0, SEEK_CUR); | |
| 85 if (-1 == cur_offset) { | |
| 86 NaClLog(LOG_FATAL, | |
| 87 "NaClGioShmUnbounded::GioCopy: could not find source ptr\n"); | |
| 88 } | |
| 89 if (-1 == (*src->vtbl->Seek)(src, 0, SEEK_SET)) { | |
| 90 NaClLog(LOG_FATAL, | |
| 91 "NaClGioShmUnbounded::GioCopy: could not rewind source\n"); | |
| 92 } | |
| 93 if (-1 == (*dst->vtbl->Seek)(dst, 0, SEEK_SET)) { | |
| 94 NaClLog(LOG_FATAL, | |
| 95 "NaClGioShmUnbounded::GioCopy: could not rewind destination\n"); | |
| 96 } | |
| 97 /* | |
| 98 * This copy process will dirty every page. An optimization for | |
| 99 * sparse data would check the result of a Read for all-zeros and | |
| 100 * Seek the corresponding number of bytes forward. This is, | |
| 101 * however, unlikely to be a common enough case in our projected use | |
| 102 * cases. | |
| 103 */ | |
| 104 while (nbytes > 0) { | |
| 105 ask = sizeof buf; | |
| 106 if (ask > nbytes) { | |
| 107 ask = nbytes; | |
| 108 } | |
| 109 NaClLog(5, | |
| 110 "GioCopy: copying 0x%"NACL_PRIxS" bytes, 0x%"NACL_PRIxS" remains\n", | |
| 111 ask, | |
| 112 nbytes); | |
| 113 got = (*src->vtbl->Read)(src, buf, ask); | |
| 114 if (got <= 0 || (size_t) got > ask) { | |
| 115 NaClLog(LOG_FATAL, | |
| 116 "NaClGioShmUnbounded::GioCopy: read failed, %"NACL_PRIdS"\n", | |
| 117 got); | |
| 118 } | |
| 119 this_copy = got; | |
| 120 | |
| 121 for (ask = (size_t) got, bufp = buf; | |
| 122 ask > 0; | |
| 123 bufp += got, ask -= got) { | |
| 124 got = (*dst->vtbl->Write)(dst, buf, ask); | |
| 125 if (got <= 0 || (size_t) got > ask) { | |
| 126 NaClLog(LOG_FATAL, | |
| 127 "NaClGioShmUnbounded::GioCopy: write failed, %"NACL_PRIdS"\n", | |
| 128 got); | |
| 129 } | |
| 130 } | |
| 131 nbytes -= this_copy; | |
| 132 } | |
| 133 if (-1 == (*dst->vtbl->Seek)(dst, cur_offset, SEEK_SET)) { | |
| 134 NaClLog(LOG_FATAL, | |
| 135 "NaClGioShmUnbounded::GioCopy: could not seek dst ptr\n"); | |
| 136 } | |
| 137 } | |
| 138 | |
| 139 static ssize_t NaClGioShmUnboundedWrite(struct Gio *vself, | |
| 140 void const *buf, | |
| 141 size_t count) { | |
| 142 struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself; | |
| 143 size_t io_offset; | |
| 144 ssize_t retval; | |
| 145 size_t new_avail_sz; | |
| 146 size_t new_size; | |
| 147 struct NaClGioShm *ngsp; | |
| 148 | |
| 149 NaClLog(4, | |
| 150 ("NaClGioShmUnboundedWrite(0x%"NACL_PRIxPTR"," | |
| 151 " 0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n"), | |
| 152 (uintptr_t) vself, (uintptr_t) buf, count); | |
| 153 if (SIZE_T_MAX - self->io_offset < count) { | |
| 154 errno = EINVAL; | |
| 155 return -1; | |
| 156 } | |
| 157 | |
| 158 /* | |
| 159 * where we'll end up when the I/O is done | |
| 160 */ | |
| 161 io_offset = self->io_offset + count; | |
| 162 | |
| 163 /* | |
| 164 * For sequential I/O, an "if" suffices. For writes that occur | |
| 165 * after a seek, however, we may need to double more than once. | |
| 166 */ | |
| 167 for (new_avail_sz = self->shm_avail_sz; | |
| 168 new_avail_sz < io_offset; | |
| 169 new_avail_sz = new_size) { | |
| 170 if (SIZE_T_MAX / 2 >= new_avail_sz) { | |
| 171 new_size = 2 * new_avail_sz; | |
| 172 } else { | |
| 173 new_size = SIZE_T_MAX - NACL_MAP_PAGESIZE; | |
| 174 ++new_size; | |
| 175 /* | |
| 176 * We could return -1 w/ ENOMEM here as well, but let's peg the | |
| 177 * max size. | |
| 178 */ | |
| 179 if (new_size <= new_avail_sz) { | |
| 180 /* | |
| 181 * We get equality if we try to expand again. | |
| 182 */ | |
| 183 errno = ENOMEM; | |
| 184 return -1; | |
| 185 } | |
| 186 } | |
| 187 } | |
| 188 if (new_avail_sz != self->shm_avail_sz) { | |
| 189 /* | |
| 190 * Replace the ngsp with one that is the new size. This means | |
| 191 * that there is a temporary 3x VM hit in the worst case. This | |
| 192 * should be primarily paging space, since I/O between the | |
| 193 * NaClGioShm object should use relatively little RAM. It will | |
| 194 * trash the cache, however. Hopefully the shm object is in the | |
| 195 * buffer cache, and we're just mapping in chunks of it into our | |
| 196 * address space. This is a bit more explicit than mmapping both | |
| 197 * source and destinaton objects completely and using madvise with | |
| 198 * MADV_SEQUENTIAL -- and likely less efficient -- but avoids | |
| 199 * OS-specific calls. | |
| 200 */ | |
| 201 | |
| 202 ngsp = malloc(sizeof *ngsp); | |
| 203 | |
| 204 if (NULL == ngsp) { | |
| 205 errno = ENOMEM; | |
| 206 return -1; | |
| 207 } | |
| 208 if (!NaClGioShmAllocCtor(ngsp, new_avail_sz)) { | |
| 209 free(ngsp); | |
| 210 errno = ENOMEM; | |
| 211 return -1; | |
| 212 } | |
| 213 GioCopy((struct Gio *) ngsp, (struct Gio *) self->ngsp, self->shm_avail_sz); | |
| 214 self->shm_avail_sz = new_avail_sz; | |
| 215 | |
| 216 if (-1 == (*self->ngsp->base.vtbl->Close)(&self->ngsp->base)) { | |
| 217 NaClLog(LOG_ERROR, | |
| 218 "NaClGioShmUnboundedWrite: close of src temporary failed\n"); | |
| 219 } | |
| 220 (*self->ngsp->base.vtbl->Dtor)(&self->ngsp->base); | |
| 221 free(self->ngsp); | |
| 222 self->ngsp = ngsp; | |
| 223 ngsp = NULL; | |
| 224 } | |
| 225 | |
| 226 retval = (*self->ngsp->base.vtbl->Write)(&self->ngsp->base, | |
| 227 buf, count); | |
| 228 if (-1 != retval) { | |
| 229 if ((size_t) retval > count) { | |
| 230 errno = EIO; /* internal error */ | |
| 231 return -1; | |
| 232 } | |
| 233 io_offset = self->io_offset + retval; | |
| 234 | |
| 235 if (io_offset > self->shm_written) { | |
| 236 self->shm_written = io_offset; | |
| 237 NaClLog(4, | |
| 238 ("UPDATE: io_offset 0x%"NACL_PRIxS | |
| 239 ", shm_written 0x%"NACL_PRIxS"\n"), | |
| 240 self->io_offset, self->shm_written); | |
| 241 } | |
| 242 self->io_offset = io_offset; | |
| 243 } | |
| 244 | |
| 245 NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n", | |
| 246 self->io_offset, self->shm_written); | |
| 247 | |
| 248 return retval; | |
| 249 } | |
| 250 | |
| 251 static off_t NaClGioShmUnboundedSeek(struct Gio *vself, | |
| 252 off_t offset, | |
| 253 int whence) { | |
| 254 struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself; | |
| 255 off_t new_pos; | |
| 256 | |
| 257 NaClLog(4, "NaClGioShmUnboundedSeek(0x%"NACL_PRIxPTR", %ld, %d)\n", | |
| 258 (uintptr_t) vself, (long) offset, whence); | |
| 259 NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n", | |
| 260 self->io_offset, self->shm_written); | |
| 261 new_pos = (*self->ngsp->base.vtbl->Seek)(&self->ngsp->base, offset, whence); | |
| 262 if (-1 != new_pos) { | |
| 263 NaClLog(4, " setting io_offset to %ld\n", (long) new_pos); | |
| 264 self->io_offset = new_pos; | |
| 265 } | |
| 266 NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n", | |
| 267 self->io_offset, self->shm_written); | |
| 268 return new_pos; | |
| 269 } | |
| 270 | |
| 271 static int NaClGioShmUnboundedFlush(struct Gio *vself) { | |
| 272 struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself; | |
| 273 | |
| 274 return (*self->ngsp->base.vtbl->Flush)(&self->ngsp->base); | |
| 275 } | |
| 276 | |
| 277 static int NaClGioShmUnboundedClose(struct Gio *vself) { | |
| 278 struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself; | |
| 279 | |
| 280 if (NULL != self->ngsp) { | |
| 281 if (0 != (*self->ngsp->base.vtbl->Close)(&self->ngsp->base)) { | |
| 282 errno = EIO; | |
| 283 return -1; | |
| 284 } | |
| 285 (*self->ngsp->base.vtbl->Dtor)(&self->ngsp->base); | |
| 286 free(self->ngsp); | |
| 287 self->ngsp = NULL; | |
| 288 } | |
| 289 return 0; | |
| 290 } | |
| 291 | |
| 292 static void NaClGioShmUnboundedDtor(struct Gio *vself) { | |
| 293 struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself; | |
| 294 | |
| 295 if (NULL != self->ngsp) { | |
| 296 if (-1 == (*vself->vtbl->Close)(vself)) { | |
| 297 NaClLog(LOG_ERROR, "NaClGioShmUnboundedDtor: auto Close failed\n"); | |
| 298 } | |
| 299 } | |
| 300 self->base.vtbl = NULL; | |
| 301 } | |
| 302 | |
| 303 const struct GioVtbl kNaClGioShmUnboundedVtbl = { | |
| 304 NaClGioShmUnboundedDtor, | |
| 305 NaClGioShmUnboundedRead, | |
| 306 NaClGioShmUnboundedWrite, | |
| 307 NaClGioShmUnboundedSeek, | |
| 308 NaClGioShmUnboundedFlush, | |
| 309 NaClGioShmUnboundedClose, | |
| 310 }; | |
| 311 | |
| 312 int NaClGioShmUnboundedCtor(struct NaClGioShmUnbounded *self) { | |
| 313 self->base.vtbl = NULL; | |
| 314 self->ngsp = malloc(sizeof *self->ngsp); | |
| 315 if (NULL == self->ngsp) { | |
| 316 return 0; | |
| 317 } | |
| 318 if (!NaClGioShmAllocCtor(self->ngsp, NACL_MAP_PAGESIZE)) { | |
| 319 free(self->ngsp); | |
| 320 return 0; | |
| 321 } | |
| 322 self->shm_avail_sz = NACL_MAP_PAGESIZE; | |
| 323 self->shm_written = 0; | |
| 324 self->io_offset = 0; | |
| 325 self->base.vtbl = &kNaClGioShmUnboundedVtbl; | |
| 326 return 1; | |
| 327 } | |
| 328 | |
| 329 struct NaClDesc *NaClGioShmUnboundedGetNaClDesc( | |
| 330 struct NaClGioShmUnbounded *self, | |
| 331 size_t *written) { | |
| 332 *written = self->shm_written; | |
| 333 return self->ngsp->shmp; | |
| 334 } | |
| OLD | NEW |