| OLD | NEW |
| (Empty) |
| 1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
| 2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
| 3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
| 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
| 5 | |
| 6 #include "primpl.h" | |
| 7 | |
| 8 #include <string.h> | |
| 9 #include <signal.h> | |
| 10 #include <unistd.h> | |
| 11 #include <fcntl.h> | |
| 12 #include <sys/types.h> | |
| 13 #include <sys/socket.h> | |
| 14 #include <sys/time.h> | |
| 15 #include <sys/ioctl.h> | |
| 16 #include <sys/mman.h> | |
| 17 #include <unistd.h> | |
| 18 #include <sys/utsname.h> | |
| 19 | |
| 20 #ifdef _PR_POLL_AVAILABLE | |
| 21 #include <poll.h> | |
| 22 #endif | |
| 23 | |
| 24 #if defined(ANDROID) | |
| 25 #include <android/api-level.h> | |
| 26 #endif | |
| 27 | |
| 28 /* To get FIONREAD */ | |
| 29 #if defined(UNIXWARE) | |
| 30 #include <sys/filio.h> | |
| 31 #endif | |
| 32 | |
| 33 #if defined(NTO) | |
| 34 #include <sys/statvfs.h> | |
| 35 #endif | |
| 36 | |
| 37 /* | |
| 38 * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or | |
| 39 * PRInt32* pointer to a _PRSockLen_t* pointer. | |
| 40 */ | |
| 41 #if defined(HAVE_SOCKLEN_T) \ | |
| 42 || (defined(__GLIBC__) && __GLIBC__ >= 2) | |
| 43 #define _PRSockLen_t socklen_t | |
| 44 #elif defined(IRIX) || defined(HPUX) || defined(OSF1) || defined(SOLARIS) \ | |
| 45 || defined(AIX4_1) || defined(LINUX) \ | |
| 46 || defined(BSDI) || defined(SCO) \ | |
| 47 || defined(DARWIN) \ | |
| 48 || defined(QNX) | |
| 49 #define _PRSockLen_t int | |
| 50 #elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) \ | |
| 51 || defined(NETBSD) || defined(OPENBSD) || defined(UNIXWARE) \ | |
| 52 || defined(DGUX) || defined(NTO) || defined(RISCOS) | |
| 53 #define _PRSockLen_t size_t | |
| 54 #else | |
| 55 #error "Cannot determine architecture" | |
| 56 #endif | |
| 57 | |
| 58 /* | |
| 59 ** Global lock variable used to bracket calls into rusty libraries that | |
| 60 ** aren't thread safe (like libc, libX, etc). | |
| 61 */ | |
| 62 static PRLock *_pr_rename_lock = NULL; | |
| 63 static PRMonitor *_pr_Xfe_mon = NULL; | |
| 64 | |
| 65 static PRInt64 minus_one; | |
| 66 | |
| 67 sigset_t timer_set; | |
| 68 | |
| 69 #if !defined(_PR_PTHREADS) | |
| 70 | |
| 71 static sigset_t empty_set; | |
| 72 | |
| 73 #ifdef SOLARIS | |
| 74 #include <sys/file.h> | |
| 75 #include <sys/filio.h> | |
| 76 #endif | |
| 77 | |
| 78 #ifndef PIPE_BUF | |
| 79 #define PIPE_BUF 512 | |
| 80 #endif | |
| 81 | |
| 82 /* | |
| 83 * _nspr_noclock - if set clock interrupts are disabled | |
| 84 */ | |
| 85 int _nspr_noclock = 1; | |
| 86 | |
| 87 #ifdef IRIX | |
| 88 extern PRInt32 _nspr_terminate_on_error; | |
| 89 #endif | |
| 90 | |
| 91 /* | |
| 92 * There is an assertion in this code that NSPR's definition of PRIOVec | |
| 93 * is bit compatible with UNIX' definition of a struct iovec. This is | |
| 94 * applicable to the 'writev()' operations where the types are casually | |
| 95 * cast to avoid warnings. | |
| 96 */ | |
| 97 | |
| 98 int _pr_md_pipefd[2] = { -1, -1 }; | |
| 99 static char _pr_md_pipebuf[PIPE_BUF]; | |
| 100 static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag, | |
| 101 PRIntervalTime timeout); | |
| 102 | |
| 103 _PRInterruptTable _pr_interruptTable[] = { | |
| 104 { | |
| 105 "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, }, | |
| 106 { | |
| 107 0 } | |
| 108 }; | |
| 109 | |
| 110 void _MD_unix_init_running_cpu(_PRCPU *cpu) | |
| 111 { | |
| 112 PR_INIT_CLIST(&(cpu->md.md_unix.ioQ)); | |
| 113 cpu->md.md_unix.ioq_max_osfd = -1; | |
| 114 cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT; | |
| 115 } | |
| 116 | |
| 117 PRStatus _MD_open_dir(_MDDir *d, const char *name) | |
| 118 { | |
| 119 int err; | |
| 120 | |
| 121 d->d = opendir(name); | |
| 122 if (!d->d) { | |
| 123 err = _MD_ERRNO(); | |
| 124 _PR_MD_MAP_OPENDIR_ERROR(err); | |
| 125 return PR_FAILURE; | |
| 126 } | |
| 127 return PR_SUCCESS; | |
| 128 } | |
| 129 | |
| 130 PRInt32 _MD_close_dir(_MDDir *d) | |
| 131 { | |
| 132 int rv = 0, err; | |
| 133 | |
| 134 if (d->d) { | |
| 135 rv = closedir(d->d); | |
| 136 if (rv == -1) { | |
| 137 err = _MD_ERRNO(); | |
| 138 _PR_MD_MAP_CLOSEDIR_ERROR(err); | |
| 139 } | |
| 140 } | |
| 141 return rv; | |
| 142 } | |
| 143 | |
| 144 char * _MD_read_dir(_MDDir *d, PRIntn flags) | |
| 145 { | |
| 146 struct dirent *de; | |
| 147 int err; | |
| 148 | |
| 149 for (;;) { | |
| 150 /* | |
| 151 * XXX: readdir() is not MT-safe. There is an MT-safe version | |
| 152 * readdir_r() on some systems. | |
| 153 */ | |
| 154 _MD_ERRNO() = 0; | |
| 155 de = readdir(d->d); | |
| 156 if (!de) { | |
| 157 err = _MD_ERRNO(); | |
| 158 _PR_MD_MAP_READDIR_ERROR(err); | |
| 159 return 0; | |
| 160 } | |
| 161 if ((flags & PR_SKIP_DOT) && | |
| 162 (de->d_name[0] == '.') && (de->d_name[1] == 0)) | |
| 163 continue; | |
| 164 if ((flags & PR_SKIP_DOT_DOT) && | |
| 165 (de->d_name[0] == '.') && (de->d_name[1] == '.') && | |
| 166 (de->d_name[2] == 0)) | |
| 167 continue; | |
| 168 if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.')) | |
| 169 continue; | |
| 170 break; | |
| 171 } | |
| 172 return de->d_name; | |
| 173 } | |
| 174 | |
| 175 PRInt32 _MD_delete(const char *name) | |
| 176 { | |
| 177 PRInt32 rv, err; | |
| 178 #ifdef UNIXWARE | |
| 179 sigset_t set, oset; | |
| 180 #endif | |
| 181 | |
| 182 #ifdef UNIXWARE | |
| 183 sigfillset(&set); | |
| 184 sigprocmask(SIG_SETMASK, &set, &oset); | |
| 185 #endif | |
| 186 rv = unlink(name); | |
| 187 #ifdef UNIXWARE | |
| 188 sigprocmask(SIG_SETMASK, &oset, NULL); | |
| 189 #endif | |
| 190 if (rv == -1) { | |
| 191 err = _MD_ERRNO(); | |
| 192 _PR_MD_MAP_UNLINK_ERROR(err); | |
| 193 } | |
| 194 return(rv); | |
| 195 } | |
| 196 | |
| 197 PRInt32 _MD_rename(const char *from, const char *to) | |
| 198 { | |
| 199 PRInt32 rv = -1, err; | |
| 200 | |
| 201 /* | |
| 202 ** This is trying to enforce the semantics of WINDOZE' rename | |
| 203 ** operation. That means one is not allowed to rename over top | |
| 204 ** of an existing file. Holding a lock across these two function | |
| 205 ** and the open function is known to be a bad idea, but .... | |
| 206 */ | |
| 207 if (NULL != _pr_rename_lock) | |
| 208 PR_Lock(_pr_rename_lock); | |
| 209 if (0 == access(to, F_OK)) | |
| 210 PR_SetError(PR_FILE_EXISTS_ERROR, 0); | |
| 211 else | |
| 212 { | |
| 213 rv = rename(from, to); | |
| 214 if (rv < 0) { | |
| 215 err = _MD_ERRNO(); | |
| 216 _PR_MD_MAP_RENAME_ERROR(err); | |
| 217 } | |
| 218 } | |
| 219 if (NULL != _pr_rename_lock) | |
| 220 PR_Unlock(_pr_rename_lock); | |
| 221 return rv; | |
| 222 } | |
| 223 | |
| 224 PRInt32 _MD_access(const char *name, PRAccessHow how) | |
| 225 { | |
| 226 PRInt32 rv, err; | |
| 227 int amode; | |
| 228 | |
| 229 switch (how) { | |
| 230 case PR_ACCESS_WRITE_OK: | |
| 231 amode = W_OK; | |
| 232 break; | |
| 233 case PR_ACCESS_READ_OK: | |
| 234 amode = R_OK; | |
| 235 break; | |
| 236 case PR_ACCESS_EXISTS: | |
| 237 amode = F_OK; | |
| 238 break; | |
| 239 default: | |
| 240 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
| 241 rv = -1; | |
| 242 goto done; | |
| 243 } | |
| 244 rv = access(name, amode); | |
| 245 | |
| 246 if (rv < 0) { | |
| 247 err = _MD_ERRNO(); | |
| 248 _PR_MD_MAP_ACCESS_ERROR(err); | |
| 249 } | |
| 250 | |
| 251 done: | |
| 252 return(rv); | |
| 253 } | |
| 254 | |
| 255 PRInt32 _MD_mkdir(const char *name, PRIntn mode) | |
| 256 { | |
| 257 int rv, err; | |
| 258 | |
| 259 /* | |
| 260 ** This lock is used to enforce rename semantics as described | |
| 261 ** in PR_Rename. Look there for more fun details. | |
| 262 */ | |
| 263 if (NULL !=_pr_rename_lock) | |
| 264 PR_Lock(_pr_rename_lock); | |
| 265 rv = mkdir(name, mode); | |
| 266 if (rv < 0) { | |
| 267 err = _MD_ERRNO(); | |
| 268 _PR_MD_MAP_MKDIR_ERROR(err); | |
| 269 } | |
| 270 if (NULL !=_pr_rename_lock) | |
| 271 PR_Unlock(_pr_rename_lock); | |
| 272 return rv; | |
| 273 } | |
| 274 | |
| 275 PRInt32 _MD_rmdir(const char *name) | |
| 276 { | |
| 277 int rv, err; | |
| 278 | |
| 279 rv = rmdir(name); | |
| 280 if (rv == -1) { | |
| 281 err = _MD_ERRNO(); | |
| 282 _PR_MD_MAP_RMDIR_ERROR(err); | |
| 283 } | |
| 284 return rv; | |
| 285 } | |
| 286 | |
| 287 PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount) | |
| 288 { | |
| 289 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 290 PRInt32 rv, err; | |
| 291 #ifndef _PR_USE_POLL | |
| 292 fd_set rd; | |
| 293 #else | |
| 294 struct pollfd pfd; | |
| 295 #endif /* _PR_USE_POLL */ | |
| 296 PRInt32 osfd = fd->secret->md.osfd; | |
| 297 | |
| 298 #ifndef _PR_USE_POLL | |
| 299 FD_ZERO(&rd); | |
| 300 FD_SET(osfd, &rd); | |
| 301 #else | |
| 302 pfd.fd = osfd; | |
| 303 pfd.events = POLLIN; | |
| 304 #endif /* _PR_USE_POLL */ | |
| 305 while ((rv = read(osfd,buf,amount)) == -1) { | |
| 306 err = _MD_ERRNO(); | |
| 307 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
| 308 if (fd->secret->nonblocking) { | |
| 309 break; | |
| 310 } | |
| 311 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 312 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, | |
| 313
PR_INTERVAL_NO_TIMEOUT)) < 0) | |
| 314 goto done;
| |
| 315 } else { | |
| 316 #ifndef _PR_USE_POLL | |
| 317 while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL)) | |
| 318 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
| 319 /* retry _MD_SELECT() if it is interrupted */ | |
| 320 } | |
| 321 #else /* _PR_USE_POLL */ | |
| 322 while ((rv = _MD_POLL(&pfd, 1, -1)) | |
| 323 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
| 324 /* retry _MD_POLL() if it is interrupted */ | |
| 325 } | |
| 326 #endif /* _PR_USE_POLL */ | |
| 327 if (rv == -1) { | |
| 328 break; | |
| 329 } | |
| 330 } | |
| 331 if (_PR_PENDING_INTERRUPT(me)) | |
| 332 break; | |
| 333 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 334 continue; | |
| 335 } else { | |
| 336 break; | |
| 337 } | |
| 338 } | |
| 339 if (rv < 0) { | |
| 340 if (_PR_PENDING_INTERRUPT(me)) { | |
| 341 me->flags &= ~_PR_INTERRUPT; | |
| 342 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 343 } else { | |
| 344 _PR_MD_MAP_READ_ERROR(err); | |
| 345 } | |
| 346 } | |
| 347 done: | |
| 348 return(rv); | |
| 349 } | |
| 350 | |
| 351 PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount) | |
| 352 { | |
| 353 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 354 PRInt32 rv, err; | |
| 355 #ifndef _PR_USE_POLL | |
| 356 fd_set wd; | |
| 357 #else | |
| 358 struct pollfd pfd; | |
| 359 #endif /* _PR_USE_POLL */ | |
| 360 PRInt32 osfd = fd->secret->md.osfd; | |
| 361 | |
| 362 #ifndef _PR_USE_POLL | |
| 363 FD_ZERO(&wd); | |
| 364 FD_SET(osfd, &wd); | |
| 365 #else | |
| 366 pfd.fd = osfd; | |
| 367 pfd.events = POLLOUT; | |
| 368 #endif /* _PR_USE_POLL */ | |
| 369 while ((rv = write(osfd,buf,amount)) == -1) { | |
| 370 err = _MD_ERRNO(); | |
| 371 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
| 372 if (fd->secret->nonblocking) { | |
| 373 break; | |
| 374 } | |
| 375 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 376 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, | |
| 377
PR_INTERVAL_NO_TIMEOUT)) < 0) | |
| 378 goto done; | |
| 379 } else { | |
| 380 #ifndef _PR_USE_POLL | |
| 381 while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL)) | |
| 382 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
| 383 /* retry _MD_SELECT() if it is interrupted */ | |
| 384 } | |
| 385 #else /* _PR_USE_POLL */ | |
| 386 while ((rv = _MD_POLL(&pfd, 1, -1)) | |
| 387 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
| 388 /* retry _MD_POLL() if it is interrupted */ | |
| 389 } | |
| 390 #endif /* _PR_USE_POLL */ | |
| 391 if (rv == -1) { | |
| 392 break; | |
| 393 } | |
| 394 } | |
| 395 if (_PR_PENDING_INTERRUPT(me)) | |
| 396 break; | |
| 397 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 398 continue; | |
| 399 } else { | |
| 400 break; | |
| 401 } | |
| 402 } | |
| 403 if (rv < 0) { | |
| 404 if (_PR_PENDING_INTERRUPT(me)) { | |
| 405 me->flags &= ~_PR_INTERRUPT; | |
| 406 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 407 } else { | |
| 408 _PR_MD_MAP_WRITE_ERROR(err); | |
| 409 } | |
| 410 } | |
| 411 done: | |
| 412 return(rv); | |
| 413 } | |
| 414 | |
| 415 PRInt32 _MD_fsync(PRFileDesc *fd) | |
| 416 { | |
| 417 PRInt32 rv, err; | |
| 418 | |
| 419 rv = fsync(fd->secret->md.osfd); | |
| 420 if (rv == -1) { | |
| 421 err = _MD_ERRNO(); | |
| 422 _PR_MD_MAP_FSYNC_ERROR(err); | |
| 423 } | |
| 424 return(rv); | |
| 425 } | |
| 426 | |
| 427 PRInt32 _MD_close(PRInt32 osfd) | |
| 428 { | |
| 429 PRInt32 rv, err; | |
| 430 | |
| 431 rv = close(osfd); | |
| 432 if (rv == -1) { | |
| 433 err = _MD_ERRNO(); | |
| 434 _PR_MD_MAP_CLOSE_ERROR(err); | |
| 435 } | |
| 436 return(rv); | |
| 437 } | |
| 438 | |
| 439 PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto) | |
| 440 { | |
| 441 PRInt32 osfd, err; | |
| 442 | |
| 443 osfd = socket(domain, type, proto); | |
| 444 | |
| 445 if (osfd == -1) { | |
| 446 err = _MD_ERRNO(); | |
| 447 _PR_MD_MAP_SOCKET_ERROR(err); | |
| 448 return(osfd); | |
| 449 } | |
| 450 | |
| 451 return(osfd); | |
| 452 } | |
| 453 | |
| 454 PRInt32 _MD_socketavailable(PRFileDesc *fd) | |
| 455 { | |
| 456 PRInt32 result; | |
| 457 | |
| 458 if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) { | |
| 459 _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO()); | |
| 460 return -1; | |
| 461 } | |
| 462 return result; | |
| 463 } | |
| 464 | |
| 465 PRInt64 _MD_socketavailable64(PRFileDesc *fd) | |
| 466 { | |
| 467 PRInt64 result; | |
| 468 LL_I2L(result, _MD_socketavailable(fd)); | |
| 469 return result; | |
| 470 } /* _MD_socketavailable64 */ | |
| 471 | |
| 472 #define READ_FD 1 | |
| 473 #define WRITE_FD 2 | |
| 474 | |
| 475 /* | |
| 476 * socket_io_wait -- | |
| 477 * | |
| 478 * wait for socket i/o, periodically checking for interrupt | |
| 479 * | |
| 480 * The first implementation uses select(), for platforms without | |
| 481 * poll(). The second (preferred) implementation uses poll(). | |
| 482 */ | |
| 483 | |
| 484 #ifndef _PR_USE_POLL | |
| 485 | |
| 486 static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, | |
| 487 PRIntervalTime timeout) | |
| 488 { | |
| 489 PRInt32 rv = -1; | |
| 490 struct timeval tv; | |
| 491 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 492 PRIntervalTime epoch, now, elapsed, remaining; | |
| 493 PRBool wait_for_remaining; | |
| 494 PRInt32 syserror; | |
| 495 fd_set rd_wr; | |
| 496 | |
| 497 switch (timeout) { | |
| 498 case PR_INTERVAL_NO_WAIT: | |
| 499 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
| 500 break; | |
| 501 case PR_INTERVAL_NO_TIMEOUT: | |
| 502 /* | |
| 503 * This is a special case of the 'default' case below. | |
| 504 * Please see the comments there. | |
| 505 */ | |
| 506 tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; | |
| 507 tv.tv_usec = 0; | |
| 508 FD_ZERO(&rd_wr); | |
| 509 do { | |
| 510 FD_SET(osfd, &rd_wr); | |
| 511 if (fd_type == READ_FD) | |
| 512 rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); | |
| 513 else | |
| 514 rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); | |
| 515 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
| 516 _PR_MD_MAP_SELECT_ERROR(syserror); | |
| 517 break; | |
| 518 } | |
| 519 if (_PR_PENDING_INTERRUPT(me)) { | |
| 520 me->flags &= ~_PR_INTERRUPT; | |
| 521 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 522 rv = -1; | |
| 523 break; | |
| 524 } | |
| 525 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
| 526 break; | |
| 527 default: | |
| 528 now = epoch = PR_IntervalNow(); | |
| 529 remaining = timeout; | |
| 530 FD_ZERO(&rd_wr); | |
| 531 do { | |
| 532 /* | |
| 533 * We block in _MD_SELECT for at most | |
| 534 * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, | |
| 535 * so that there is an upper limit on the delay | |
| 536 * before the interrupt bit is checked. | |
| 537 */ | |
| 538 wait_for_remaining = PR_TRUE; | |
| 539 tv.tv_sec = PR_IntervalToSeconds(remaining); | |
| 540 if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) { | |
| 541 wait_for_remaining = PR_FALSE; | |
| 542 tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; | |
| 543 tv.tv_usec = 0; | |
| 544 } else { | |
| 545 tv.tv_usec = PR_IntervalToMicroseconds( | |
| 546 remaining - | |
| 547 PR_SecondsToInterval(tv.tv_sec)); | |
| 548 } | |
| 549 FD_SET(osfd, &rd_wr); | |
| 550 if (fd_type == READ_FD) | |
| 551 rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); | |
| 552 else | |
| 553 rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); | |
| 554 /* | |
| 555 * we don't consider EINTR a real error | |
| 556 */ | |
| 557 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
| 558 _PR_MD_MAP_SELECT_ERROR(syserror); | |
| 559 break; | |
| 560 } | |
| 561 if (_PR_PENDING_INTERRUPT(me)) { | |
| 562 me->flags &= ~_PR_INTERRUPT; | |
| 563 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 564 rv = -1; | |
| 565 break; | |
| 566 } | |
| 567 /* | |
| 568 * We loop again if _MD_SELECT timed out or got interrupted | |
| 569 * by a signal, and the timeout deadline has not passed yet. | |
| 570 */ | |
| 571 if (rv == 0 || (rv == -1 && syserror == EINTR)) { | |
| 572 /* | |
| 573 * If _MD_SELECT timed out, we know how much time | |
| 574 * we spent in blocking, so we can avoid a | |
| 575 * PR_IntervalNow() call. | |
| 576 */ | |
| 577 if (rv == 0) { | |
| 578 if (wait_for_remaining) { | |
| 579 now += remaining; | |
| 580 } else { | |
| 581 now += PR_SecondsToInterval(tv.tv_sec) | |
| 582 + PR_MicrosecondsToInterval(tv.tv_usec); | |
| 583 } | |
| 584 } else { | |
| 585 now = PR_IntervalNow(); | |
| 586 } | |
| 587 elapsed = (PRIntervalTime) (now - epoch); | |
| 588 if (elapsed >= timeout) { | |
| 589 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
| 590 rv = -1; | |
| 591 break; | |
| 592 } else { | |
| 593 remaining = timeout - elapsed; | |
| 594 } | |
| 595 } | |
| 596 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
| 597 break; | |
| 598 } | |
| 599 return(rv); | |
| 600 } | |
| 601 | |
| 602 #else /* _PR_USE_POLL */ | |
| 603 | |
| 604 static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, | |
| 605 PRIntervalTime timeout) | |
| 606 { | |
| 607 PRInt32 rv = -1; | |
| 608 int msecs; | |
| 609 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 610 PRIntervalTime epoch, now, elapsed, remaining; | |
| 611 PRBool wait_for_remaining; | |
| 612 PRInt32 syserror; | |
| 613 struct pollfd pfd; | |
| 614 | |
| 615 switch (timeout) { | |
| 616 case PR_INTERVAL_NO_WAIT: | |
| 617 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
| 618 break; | |
| 619 case PR_INTERVAL_NO_TIMEOUT: | |
| 620 /* | |
| 621 * This is a special case of the 'default' case below. | |
| 622 * Please see the comments there. | |
| 623 */ | |
| 624 msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; | |
| 625 pfd.fd = osfd; | |
| 626 if (fd_type == READ_FD) { | |
| 627 pfd.events = POLLIN; | |
| 628 } else { | |
| 629 pfd.events = POLLOUT; | |
| 630 } | |
| 631 do { | |
| 632 rv = _MD_POLL(&pfd, 1, msecs); | |
| 633 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
| 634 _PR_MD_MAP_POLL_ERROR(syserror); | |
| 635 break; | |
| 636 } | |
| 637 /* | |
| 638 * If POLLERR is set, don't process it; retry th
e operation | |
| 639 */ | |
| 640 if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { | |
| 641 rv = -1; | |
| 642 _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); | |
| 643 break; | |
| 644 } | |
| 645 if (_PR_PENDING_INTERRUPT(me)) { | |
| 646 me->flags &= ~_PR_INTERRUPT; | |
| 647 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 648 rv = -1; | |
| 649 break; | |
| 650 } | |
| 651 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
| 652 break; | |
| 653 default: | |
| 654 now = epoch = PR_IntervalNow(); | |
| 655 remaining = timeout; | |
| 656 pfd.fd = osfd; | |
| 657 if (fd_type == READ_FD) { | |
| 658 pfd.events = POLLIN; | |
| 659 } else { | |
| 660 pfd.events = POLLOUT; | |
| 661 } | |
| 662 do { | |
| 663 /* | |
| 664 * We block in _MD_POLL for at most | |
| 665 * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, | |
| 666 * so that there is an upper limit on the delay | |
| 667 * before the interrupt bit is checked. | |
| 668 */ | |
| 669 wait_for_remaining = PR_TRUE; | |
| 670 msecs = PR_IntervalToMilliseconds(remaining); | |
| 671 if (msecs > _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000) { | |
| 672 wait_for_remaining = PR_FALSE; | |
| 673 msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; | |
| 674 } | |
| 675 rv = _MD_POLL(&pfd, 1, msecs); | |
| 676 /* | |
| 677 * we don't consider EINTR a real error | |
| 678 */ | |
| 679 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
| 680 _PR_MD_MAP_POLL_ERROR(syserror); | |
| 681 break; | |
| 682 } | |
| 683 if (_PR_PENDING_INTERRUPT(me)) { | |
| 684 me->flags &= ~_PR_INTERRUPT; | |
| 685 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 686 rv = -1; | |
| 687 break; | |
| 688 } | |
| 689 /* | |
| 690 * If POLLERR is set, don't process it; retry th
e operation | |
| 691 */ | |
| 692 if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { | |
| 693 rv = -1; | |
| 694 _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); | |
| 695 break; | |
| 696 } | |
| 697 /* | |
| 698 * We loop again if _MD_POLL timed out or got interrupted | |
| 699 * by a signal, and the timeout deadline has not passed yet. | |
| 700 */ | |
| 701 if (rv == 0 || (rv == -1 && syserror == EINTR)) { | |
| 702 /* | |
| 703 * If _MD_POLL timed out, we know how much time | |
| 704 * we spent in blocking, so we can avoid a | |
| 705 * PR_IntervalNow() call. | |
| 706 */ | |
| 707 if (rv == 0) { | |
| 708 if (wait_for_remaining) { | |
| 709 now += remaining; | |
| 710 } else { | |
| 711 now += PR_MillisecondsToInterval(msecs); | |
| 712 } | |
| 713 } else { | |
| 714 now = PR_IntervalNow(); | |
| 715 } | |
| 716 elapsed = (PRIntervalTime) (now - epoch); | |
| 717 if (elapsed >= timeout) { | |
| 718 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
| 719 rv = -1; | |
| 720 break; | |
| 721 } else { | |
| 722 remaining = timeout - elapsed; | |
| 723 } | |
| 724 } | |
| 725 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
| 726 break; | |
| 727 } | |
| 728 return(rv); | |
| 729 } | |
| 730 | |
| 731 #endif /* _PR_USE_POLL */ | |
| 732 | |
| 733 static PRInt32 local_io_wait( | |
| 734 PRInt32 osfd, | |
| 735 PRInt32 wait_flag, | |
| 736 PRIntervalTime timeout) | |
| 737 { | |
| 738 _PRUnixPollDesc pd; | |
| 739 PRInt32 rv; | |
| 740 | |
| 741 PR_LOG(_pr_io_lm, PR_LOG_MIN, | |
| 742 ("waiting to %s on osfd=%d", | |
| 743 (wait_flag == _PR_UNIX_POLL_READ) ? "read" : "write", | |
| 744 osfd)); | |
| 745 | |
| 746 if (timeout == PR_INTERVAL_NO_WAIT) return 0; | |
| 747 | |
| 748 pd.osfd = osfd; | |
| 749 pd.in_flags = wait_flag; | |
| 750 pd.out_flags = 0; | |
| 751 | |
| 752 rv = _PR_WaitForMultipleFDs(&pd, 1, timeout); | |
| 753 | |
| 754 if (rv == 0) { | |
| 755 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
| 756 rv = -1; | |
| 757 } | |
| 758 return rv; | |
| 759 } | |
| 760 | |
| 761 | |
| 762 PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount, | |
| 763 PRInt32 flags, PRIntervalTime timeout) | |
| 764 { | |
| 765 PRInt32 osfd = fd->secret->md.osfd; | |
| 766 PRInt32 rv, err; | |
| 767 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 768 | |
| 769 /* | |
| 770 * Many OS's (Solaris, Unixware) have a broken recv which won't read | |
| 771 * from socketpairs. As long as we don't use flags on socketpairs, this | |
| 772 * is a decent fix. - mikep | |
| 773 */ | |
| 774 #if defined(UNIXWARE) || defined(SOLARIS) | |
| 775 while ((rv = read(osfd,buf,amount)) == -1) { | |
| 776 #else | |
| 777 while ((rv = recv(osfd,buf,amount,flags)) == -1) { | |
| 778 #endif | |
| 779 err = _MD_ERRNO(); | |
| 780 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
| 781 if (fd->secret->nonblocking) { | |
| 782 break; | |
| 783 } | |
| 784 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 785 if ((rv = local_io_wait(osfd,_PR_UNIX_POLL_READ,
timeout)) < 0) | |
| 786 goto done; | |
| 787 } else { | |
| 788 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) | |
| 789 goto done; | |
| 790 } | |
| 791 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 792 continue; | |
| 793 } else { | |
| 794 break; | |
| 795 } | |
| 796 } | |
| 797 if (rv < 0) { | |
| 798 _PR_MD_MAP_RECV_ERROR(err); | |
| 799 } | |
| 800 done: | |
| 801 return(rv); | |
| 802 } | |
| 803 | |
| 804 PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount, | |
| 805 PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen, | |
| 806 PRIntervalTime timeout) | |
| 807 { | |
| 808 PRInt32 osfd = fd->secret->md.osfd; | |
| 809 PRInt32 rv, err; | |
| 810 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 811 | |
| 812 while ((*addrlen = PR_NETADDR_SIZE(addr)), | |
| 813 ((rv = recvfrom(osfd, buf, amount, flags, | |
| 814 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen)) == -
1)) { | |
| 815 err = _MD_ERRNO(); | |
| 816 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
| 817 if (fd->secret->nonblocking) { | |
| 818 break; | |
| 819 } | |
| 820 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 821 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) | |
| 822 goto done; | |
| 823 } else { | |
| 824 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) | |
| 825 goto done; | |
| 826 } | |
| 827 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 828 continue; | |
| 829 } else { | |
| 830 break; | |
| 831 } | |
| 832 } | |
| 833 if (rv < 0) { | |
| 834 _PR_MD_MAP_RECVFROM_ERROR(err); | |
| 835 } | |
| 836 done: | |
| 837 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 838 if (rv != -1) { | |
| 839 /* ignore the sa_len field of struct sockaddr */ | |
| 840 if (addr) { | |
| 841 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
| 842 } | |
| 843 } | |
| 844 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
| 845 return(rv); | |
| 846 } | |
| 847 | |
| 848 PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount, | |
| 849 PRInt32 flags, PRIntervalTime timeout) | |
| 850 { | |
| 851 PRInt32 osfd = fd->secret->md.osfd; | |
| 852 PRInt32 rv, err; | |
| 853 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 854 #if defined(SOLARIS) | |
| 855 PRInt32 tmp_amount = amount; | |
| 856 #endif | |
| 857 | |
| 858 /* | |
| 859 * On pre-2.6 Solaris, send() is much slower than write(). | |
| 860 * On 2.6 and beyond, with in-kernel sockets, send() and | |
| 861 * write() are fairly equivalent in performance. | |
| 862 */ | |
| 863 #if defined(SOLARIS) | |
| 864 PR_ASSERT(0 == flags); | |
| 865 while ((rv = write(osfd,buf,tmp_amount)) == -1) { | |
| 866 #else | |
| 867 while ((rv = send(osfd,buf,amount,flags)) == -1) { | |
| 868 #endif | |
| 869 err = _MD_ERRNO(); | |
| 870 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
| 871 if (fd->secret->nonblocking) { | |
| 872 break; | |
| 873 } | |
| 874 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 875 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0
) | |
| 876 goto done; | |
| 877 } else { | |
| 878 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) | |
| 879 goto done; | |
| 880 } | |
| 881 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 882 continue; | |
| 883 } else { | |
| 884 #if defined(SOLARIS) | |
| 885 /* | |
| 886 * The write system call has been reported to return the
ERANGE | |
| 887 * error on occasion. Try to write in smaller chunks to
workaround | |
| 888 * this bug. | |
| 889 */ | |
| 890 if (err == ERANGE) { | |
| 891 if (tmp_amount > 1) { | |
| 892 tmp_amount = tmp_amount/2; /* half
the bytes */ | |
| 893 continue; | |
| 894 } | |
| 895 } | |
| 896 #endif | |
| 897 break; | |
| 898 } | |
| 899 } | |
| 900 /* | |
| 901 * optimization; if bytes sent is less than "amount" call | |
| 902 * select before returning. This is because it is likely that | |
| 903 * the next send() call will return EWOULDBLOCK. | |
| 904 */ | |
| 905 if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) | |
| 906 && (timeout != PR_INTERVAL_NO_WAIT)) { | |
| 907 if (_PR_IS_NATIVE_THREAD(me)) { | |
| 908 if (socket_io_wait(osfd, WRITE_FD, timeout)< 0) { | |
| 909 rv = -1; | |
| 910 goto done; | |
| 911 } | |
| 912 } else { | |
| 913 if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) <
0) { | |
| 914 rv = -1; | |
| 915 goto done; | |
| 916 } | |
| 917 } | |
| 918 } | |
| 919 if (rv < 0) { | |
| 920 _PR_MD_MAP_SEND_ERROR(err); | |
| 921 } | |
| 922 done: | |
| 923 return(rv); | |
| 924 } | |
| 925 | |
| 926 PRInt32 _MD_sendto( | |
| 927 PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags, | |
| 928 const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout) | |
| 929 { | |
| 930 PRInt32 osfd = fd->secret->md.osfd; | |
| 931 PRInt32 rv, err; | |
| 932 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 933 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 934 PRNetAddr addrCopy; | |
| 935 | |
| 936 addrCopy = *addr; | |
| 937 ((struct sockaddr *) &addrCopy)->sa_len = addrlen; | |
| 938 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; | |
| 939 | |
| 940 while ((rv = sendto(osfd, buf, amount, flags, | |
| 941 (struct sockaddr *) &addrCopy, addrlen)) == -1) { | |
| 942 #else | |
| 943 while ((rv = sendto(osfd, buf, amount, flags, | |
| 944 (struct sockaddr *) addr, addrlen)) == -1) { | |
| 945 #endif | |
| 946 err = _MD_ERRNO(); | |
| 947 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
| 948 if (fd->secret->nonblocking) { | |
| 949 break; | |
| 950 } | |
| 951 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 952 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRIT
E, timeout)) < 0) | |
| 953 goto done; | |
| 954 } else { | |
| 955 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) | |
| 956 goto done; | |
| 957 } | |
| 958 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 959 continue; | |
| 960 } else { | |
| 961 break; | |
| 962 } | |
| 963 } | |
| 964 if (rv < 0) { | |
| 965 _PR_MD_MAP_SENDTO_ERROR(err); | |
| 966 } | |
| 967 done: | |
| 968 return(rv); | |
| 969 } | |
| 970 | |
| 971 PRInt32 _MD_writev( | |
| 972 PRFileDesc *fd, const PRIOVec *iov, | |
| 973 PRInt32 iov_size, PRIntervalTime timeout) | |
| 974 { | |
| 975 PRInt32 rv, err; | |
| 976 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 977 PRInt32 index, amount = 0; | |
| 978 PRInt32 osfd = fd->secret->md.osfd; | |
| 979 | |
| 980 /* | |
| 981 * Calculate the total number of bytes to be sent; needed for | |
| 982 * optimization later. | |
| 983 * We could avoid this if this number was passed in; but it is | |
| 984 * probably not a big deal because iov_size is usually small (less than | |
| 985 * 3) | |
| 986 */ | |
| 987 if (!fd->secret->nonblocking) { | |
| 988 for (index=0; index<iov_size; index++) { | |
| 989 amount += iov[index].iov_len; | |
| 990 } | |
| 991 } | |
| 992 | |
| 993 while ((rv = writev(osfd, (const struct iovec*)iov, iov_size)) == -1) { | |
| 994 err = _MD_ERRNO(); | |
| 995 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
| 996 if (fd->secret->nonblocking) { | |
| 997 break; | |
| 998 } | |
| 999 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 1000 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRIT
E, timeout)) < 0) | |
| 1001 goto done; | |
| 1002 } else { | |
| 1003 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))<0) | |
| 1004 goto done; | |
| 1005 } | |
| 1006 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 1007 continue; | |
| 1008 } else { | |
| 1009 break; | |
| 1010 } | |
| 1011 } | |
| 1012 /* | |
| 1013 * optimization; if bytes sent is less than "amount" call | |
| 1014 * select before returning. This is because it is likely that | |
| 1015 * the next writev() call will return EWOULDBLOCK. | |
| 1016 */ | |
| 1017 if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) | |
| 1018 && (timeout != PR_INTERVAL_NO_WAIT)) { | |
| 1019 if (_PR_IS_NATIVE_THREAD(me)) { | |
| 1020 if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) { | |
| 1021 rv = -1; | |
| 1022 goto done; | |
| 1023 } | |
| 1024 } else { | |
| 1025 if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) <
0) { | |
| 1026 rv = -1; | |
| 1027 goto done; | |
| 1028 } | |
| 1029 } | |
| 1030 } | |
| 1031 if (rv < 0) { | |
| 1032 _PR_MD_MAP_WRITEV_ERROR(err); | |
| 1033 } | |
| 1034 done: | |
| 1035 return(rv); | |
| 1036 } | |
| 1037 | |
| 1038 PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr, | |
| 1039 PRUint32 *addrlen, PRIntervalTime timeout) | |
| 1040 { | |
| 1041 PRInt32 osfd = fd->secret->md.osfd; | |
| 1042 PRInt32 rv, err; | |
| 1043 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 1044 | |
| 1045 while ((rv = accept(osfd, (struct sockaddr *) addr, | |
| 1046 (_PRSockLen_t *)addrlen)) == -1) { | |
| 1047 err = _MD_ERRNO(); | |
| 1048 if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == ECONNABORTED)) { | |
| 1049 if (fd->secret->nonblocking) { | |
| 1050 break; | |
| 1051 } | |
| 1052 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 1053 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ
, timeout)) < 0) | |
| 1054 goto done; | |
| 1055 } else { | |
| 1056 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) | |
| 1057 goto done; | |
| 1058 } | |
| 1059 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
| 1060 continue; | |
| 1061 } else { | |
| 1062 break; | |
| 1063 } | |
| 1064 } | |
| 1065 if (rv < 0) { | |
| 1066 _PR_MD_MAP_ACCEPT_ERROR(err); | |
| 1067 } | |
| 1068 done: | |
| 1069 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 1070 if (rv != -1) { | |
| 1071 /* ignore the sa_len field of struct sockaddr */ | |
| 1072 if (addr) { | |
| 1073 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
| 1074 } | |
| 1075 } | |
| 1076 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
| 1077 return(rv); | |
| 1078 } | |
| 1079 | |
| 1080 extern int _connect (int s, const struct sockaddr *name, int namelen); | |
| 1081 PRInt32 _MD_connect( | |
| 1082 PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime time
out) | |
| 1083 { | |
| 1084 PRInt32 rv, err; | |
| 1085 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 1086 PRInt32 osfd = fd->secret->md.osfd; | |
| 1087 #ifdef IRIX | |
| 1088 extern PRInt32 _MD_irix_connect( | |
| 1089 PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime tim
eout); | |
| 1090 #endif | |
| 1091 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 1092 PRNetAddr addrCopy; | |
| 1093 | |
| 1094 addrCopy = *addr; | |
| 1095 ((struct sockaddr *) &addrCopy)->sa_len = addrlen; | |
| 1096 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; | |
| 1097 #endif | |
| 1098 | |
| 1099 /* | |
| 1100 * We initiate the connection setup by making a nonblocking connect() | |
| 1101 * call. If the connect() call fails, there are two cases we handle | |
| 1102 * specially: | |
| 1103 * 1. The connect() call was interrupted by a signal. In this case | |
| 1104 * we simply retry connect(). | |
| 1105 * 2. The NSPR socket is nonblocking and connect() fails with | |
| 1106 * EINPROGRESS. We first wait until the socket becomes writable. | |
| 1107 * Then we try to find out whether the connection setup succeeded | |
| 1108 * or failed. | |
| 1109 */ | |
| 1110 | |
| 1111 retry: | |
| 1112 #ifdef IRIX | |
| 1113 if ((rv = _MD_irix_connect(osfd, addr, addrlen, timeout)) == -1) { | |
| 1114 #else | |
| 1115 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 1116 if ((rv = connect(osfd, (struct sockaddr *)&addrCopy, addrlen)) == -1) { | |
| 1117 #else | |
| 1118 if ((rv = connect(osfd, (struct sockaddr *)addr, addrlen)) == -1) { | |
| 1119 #endif | |
| 1120 #endif | |
| 1121 err = _MD_ERRNO(); | |
| 1122 | |
| 1123 if (err == EINTR) { | |
| 1124 if (_PR_PENDING_INTERRUPT(me)) { | |
| 1125 me->flags &= ~_PR_INTERRUPT; | |
| 1126 PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); | |
| 1127 return -1; | |
| 1128 } | |
| 1129 goto retry; | |
| 1130 } | |
| 1131 | |
| 1132 if (!fd->secret->nonblocking && (err == EINPROGRESS)) { | |
| 1133 if (!_PR_IS_NATIVE_THREAD(me)) { | |
| 1134 | |
| 1135 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRIT
E, timeout)) < 0) | |
| 1136 return -1; | |
| 1137 } else { | |
| 1138 /* | |
| 1139 * socket_io_wait() may return -1 or 1. | |
| 1140 */ | |
| 1141 | |
| 1142 rv = socket_io_wait(osfd, WRITE_FD, timeout); | |
| 1143 if (rv == -1) { | |
| 1144 return -1; | |
| 1145 } | |
| 1146 } | |
| 1147 | |
| 1148 PR_ASSERT(rv == 1); | |
| 1149 if (_PR_PENDING_INTERRUPT(me)) { | |
| 1150 me->flags &= ~_PR_INTERRUPT; | |
| 1151 PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); | |
| 1152 return -1; | |
| 1153 } | |
| 1154 err = _MD_unix_get_nonblocking_connect_error(osfd); | |
| 1155 if (err != 0) { | |
| 1156 _PR_MD_MAP_CONNECT_ERROR(err); | |
| 1157 return -1; | |
| 1158 } | |
| 1159 return 0; | |
| 1160 } | |
| 1161 | |
| 1162 _PR_MD_MAP_CONNECT_ERROR(err); | |
| 1163 } | |
| 1164 | |
| 1165 return rv; | |
| 1166 } /* _MD_connect */ | |
| 1167 | |
| 1168 PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen) | |
| 1169 { | |
| 1170 PRInt32 rv, err; | |
| 1171 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 1172 PRNetAddr addrCopy; | |
| 1173 | |
| 1174 addrCopy = *addr; | |
| 1175 ((struct sockaddr *) &addrCopy)->sa_len = addrlen; | |
| 1176 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; | |
| 1177 rv = bind(fd->secret->md.osfd, (struct sockaddr *) &addrCopy, (int )addrlen)
; | |
| 1178 #else | |
| 1179 rv = bind(fd->secret->md.osfd, (struct sockaddr *) addr, (int )addrlen); | |
| 1180 #endif | |
| 1181 if (rv < 0) { | |
| 1182 err = _MD_ERRNO(); | |
| 1183 _PR_MD_MAP_BIND_ERROR(err); | |
| 1184 } | |
| 1185 return(rv); | |
| 1186 } | |
| 1187 | |
| 1188 PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog) | |
| 1189 { | |
| 1190 PRInt32 rv, err; | |
| 1191 | |
| 1192 rv = listen(fd->secret->md.osfd, backlog); | |
| 1193 if (rv < 0) { | |
| 1194 err = _MD_ERRNO(); | |
| 1195 _PR_MD_MAP_LISTEN_ERROR(err); | |
| 1196 } | |
| 1197 return(rv); | |
| 1198 } | |
| 1199 | |
| 1200 PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how) | |
| 1201 { | |
| 1202 PRInt32 rv, err; | |
| 1203 | |
| 1204 rv = shutdown(fd->secret->md.osfd, how); | |
| 1205 if (rv < 0) { | |
| 1206 err = _MD_ERRNO(); | |
| 1207 _PR_MD_MAP_SHUTDOWN_ERROR(err); | |
| 1208 } | |
| 1209 return(rv); | |
| 1210 } | |
| 1211 | |
| 1212 PRInt32 _MD_socketpair(int af, int type, int flags, | |
| 1213 PRInt32 *osfd) | |
| 1214 { | |
| 1215 PRInt32 rv, err; | |
| 1216 | |
| 1217 rv = socketpair(af, type, flags, osfd); | |
| 1218 if (rv < 0) { | |
| 1219 err = _MD_ERRNO(); | |
| 1220 _PR_MD_MAP_SOCKETPAIR_ERROR(err); | |
| 1221 } | |
| 1222 return rv; | |
| 1223 } | |
| 1224 | |
| 1225 PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr, | |
| 1226 PRUint32 *addrlen) | |
| 1227 { | |
| 1228 PRInt32 rv, err; | |
| 1229 | |
| 1230 rv = getsockname(fd->secret->md.osfd, | |
| 1231 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); | |
| 1232 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 1233 if (rv == 0) { | |
| 1234 /* ignore the sa_len field of struct sockaddr */ | |
| 1235 if (addr) { | |
| 1236 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
| 1237 } | |
| 1238 } | |
| 1239 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
| 1240 if (rv < 0) { | |
| 1241 err = _MD_ERRNO(); | |
| 1242 _PR_MD_MAP_GETSOCKNAME_ERROR(err); | |
| 1243 } | |
| 1244 return rv==0?PR_SUCCESS:PR_FAILURE; | |
| 1245 } | |
| 1246 | |
| 1247 PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr, | |
| 1248 PRUint32 *addrlen) | |
| 1249 { | |
| 1250 PRInt32 rv, err; | |
| 1251 | |
| 1252 rv = getpeername(fd->secret->md.osfd, | |
| 1253 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); | |
| 1254 #ifdef _PR_HAVE_SOCKADDR_LEN | |
| 1255 if (rv == 0) { | |
| 1256 /* ignore the sa_len field of struct sockaddr */ | |
| 1257 if (addr) { | |
| 1258 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
| 1259 } | |
| 1260 } | |
| 1261 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
| 1262 if (rv < 0) { | |
| 1263 err = _MD_ERRNO(); | |
| 1264 _PR_MD_MAP_GETPEERNAME_ERROR(err); | |
| 1265 } | |
| 1266 return rv==0?PR_SUCCESS:PR_FAILURE; | |
| 1267 } | |
| 1268 | |
| 1269 PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level, | |
| 1270 PRInt32 optname, char* optval, PRInt32* optlen) | |
| 1271 { | |
| 1272 PRInt32 rv, err; | |
| 1273 | |
| 1274 rv = getsockopt(fd->secret->md.osfd, level, optname, optval, (_PRSockLen_t *
)optlen); | |
| 1275 if (rv < 0) { | |
| 1276 err = _MD_ERRNO(); | |
| 1277 _PR_MD_MAP_GETSOCKOPT_ERROR(err); | |
| 1278 } | |
| 1279 return rv==0?PR_SUCCESS:PR_FAILURE; | |
| 1280 } | |
| 1281 | |
| 1282 PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level, | |
| 1283 PRInt32 optname, const char* optval, PRInt32 optlen) | |
| 1284 { | |
| 1285 PRInt32 rv, err; | |
| 1286 | |
| 1287 rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen); | |
| 1288 if (rv < 0) { | |
| 1289 err = _MD_ERRNO(); | |
| 1290 _PR_MD_MAP_SETSOCKOPT_ERROR(err); | |
| 1291 } | |
| 1292 return rv==0?PR_SUCCESS:PR_FAILURE; | |
| 1293 } | |
| 1294 | |
| 1295 PRStatus _MD_set_fd_inheritable(PRFileDesc *fd, PRBool inheritable) | |
| 1296 { | |
| 1297 int rv; | |
| 1298 | |
| 1299 rv = fcntl(fd->secret->md.osfd, F_SETFD, inheritable ? 0 : FD_CLOEXEC); | |
| 1300 if (-1 == rv) { | |
| 1301 PR_SetError(PR_UNKNOWN_ERROR, _MD_ERRNO()); | |
| 1302 return PR_FAILURE; | |
| 1303 } | |
| 1304 return PR_SUCCESS; | |
| 1305 } | |
| 1306 | |
| 1307 void _MD_init_fd_inheritable(PRFileDesc *fd, PRBool imported) | |
| 1308 { | |
| 1309 if (imported) { | |
| 1310 fd->secret->inheritable = _PR_TRI_UNKNOWN; | |
| 1311 } else { | |
| 1312 /* By default, a Unix fd is not closed on exec. */ | |
| 1313 #ifdef DEBUG | |
| 1314 { | |
| 1315 int flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); | |
| 1316 PR_ASSERT(0 == flags); | |
| 1317 } | |
| 1318 #endif | |
| 1319 fd->secret->inheritable = _PR_TRI_TRUE; | |
| 1320 } | |
| 1321 } | |
| 1322 | |
| 1323 /************************************************************************/ | |
| 1324 #if !defined(_PR_USE_POLL) | |
| 1325 | |
| 1326 /* | |
| 1327 ** Scan through io queue and find any bad fd's that triggered the error | |
| 1328 ** from _MD_SELECT | |
| 1329 */ | |
| 1330 static void FindBadFDs(void) | |
| 1331 { | |
| 1332 PRCList *q; | |
| 1333 PRThread *me = _MD_CURRENT_THREAD(); | |
| 1334 | |
| 1335 PR_ASSERT(!_PR_IS_NATIVE_THREAD(me)); | |
| 1336 q = (_PR_IOQ(me->cpu)).next; | |
| 1337 _PR_IOQ_MAX_OSFD(me->cpu) = -1; | |
| 1338 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; | |
| 1339 while (q != &_PR_IOQ(me->cpu)) { | |
| 1340 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
| 1341 PRBool notify = PR_FALSE; | |
| 1342 _PRUnixPollDesc *pds = pq->pds; | |
| 1343 _PRUnixPollDesc *epds = pds + pq->npds; | |
| 1344 PRInt32 pq_max_osfd = -1; | |
| 1345 | |
| 1346 q = q->next; | |
| 1347 for (; pds < epds; pds++) { | |
| 1348 PRInt32 osfd = pds->osfd; | |
| 1349 pds->out_flags = 0; | |
| 1350 PR_ASSERT(osfd >= 0 || pds->in_flags == 0); | |
| 1351 if (pds->in_flags == 0) { | |
| 1352 continue; /* skip this fd */ | |
| 1353 } | |
| 1354 if (fcntl(osfd, F_GETFL, 0) == -1) { | |
| 1355 /* Found a bad descriptor, remove it from the fd_sets. */ | |
| 1356 PR_LOG(_pr_io_lm, PR_LOG_MAX, | |
| 1357 ("file descriptor %d is bad", osfd)); | |
| 1358 pds->out_flags = _PR_UNIX_POLL_NVAL; | |
| 1359 notify = PR_TRUE; | |
| 1360 } | |
| 1361 if (osfd > pq_max_osfd) { | |
| 1362 pq_max_osfd = osfd; | |
| 1363 } | |
| 1364 } | |
| 1365 | |
| 1366 if (notify) { | |
| 1367 PRIntn pri; | |
| 1368 PR_REMOVE_LINK(&pq->links); | |
| 1369 pq->on_ioq = PR_FALSE; | |
| 1370 | |
| 1371 /* | |
| 1372 * Decrement the count of descriptors for each desciptor/event | |
| 1373 * because this I/O request is being removed from the | |
| 1374 * ioq | |
| 1375 */ | |
| 1376 pds = pq->pds; | |
| 1377 for (; pds < epds; pds++) { | |
| 1378 PRInt32 osfd = pds->osfd; | |
| 1379 PRInt16 in_flags = pds->in_flags; | |
| 1380 PR_ASSERT(osfd >= 0 || in_flags == 0); | |
| 1381 if (in_flags & _PR_UNIX_POLL_READ) { | |
| 1382 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) | |
| 1383 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); | |
| 1384 } | |
| 1385 if (in_flags & _PR_UNIX_POLL_WRITE) { | |
| 1386 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) | |
| 1387 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
| 1388 } | |
| 1389 if (in_flags & _PR_UNIX_POLL_EXCEPT) { | |
| 1390 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) | |
| 1391 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
| 1392 } | |
| 1393 } | |
| 1394 | |
| 1395 _PR_THREAD_LOCK(pq->thr); | |
| 1396 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
| 1397 _PRCPU *cpu = pq->thr->cpu; | |
| 1398 _PR_SLEEPQ_LOCK(pq->thr->cpu); | |
| 1399 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); | |
| 1400 _PR_SLEEPQ_UNLOCK(pq->thr->cpu); | |
| 1401 | |
| 1402 if (pq->thr->flags & _PR_SUSPENDING) { | |
| 1403 /* | |
| 1404 * set thread state to SUSPENDED; | |
| 1405 * a Resume operation on the thread | |
| 1406 * will move it to the runQ | |
| 1407 */ | |
| 1408 pq->thr->state = _PR_SUSPENDED; | |
| 1409 _PR_MISCQ_LOCK(pq->thr->cpu); | |
| 1410 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); | |
| 1411 _PR_MISCQ_UNLOCK(pq->thr->cpu); | |
| 1412 } else { | |
| 1413 pri = pq->thr->priority; | |
| 1414 pq->thr->state = _PR_RUNNABLE; | |
| 1415 | |
| 1416 _PR_RUNQ_LOCK(cpu); | |
| 1417 _PR_ADD_RUNQ(pq->thr, cpu, pri); | |
| 1418 _PR_RUNQ_UNLOCK(cpu); | |
| 1419 } | |
| 1420 } | |
| 1421 _PR_THREAD_UNLOCK(pq->thr); | |
| 1422 } else { | |
| 1423 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) | |
| 1424 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; | |
| 1425 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) | |
| 1426 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; | |
| 1427 } | |
| 1428 } | |
| 1429 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
| 1430 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) | |
| 1431 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
| 1432 } | |
| 1433 } | |
| 1434 #endif /* !defined(_PR_USE_POLL) */ | |
| 1435 | |
| 1436 /************************************************************************/ | |
| 1437 | |
| 1438 /* | |
| 1439 ** Called by the scheduler when there is nothing to do. This means that | |
| 1440 ** all threads are blocked on some monitor somewhere. | |
| 1441 ** | |
| 1442 ** Note: this code doesn't release the scheduler lock. | |
| 1443 */ | |
| 1444 /* | |
| 1445 ** Pause the current CPU. longjmp to the cpu's pause stack | |
| 1446 ** | |
| 1447 ** This must be called with the scheduler locked | |
| 1448 */ | |
| 1449 void _MD_PauseCPU(PRIntervalTime ticks) | |
| 1450 { | |
| 1451 PRThread *me = _MD_CURRENT_THREAD(); | |
| 1452 #ifdef _PR_USE_POLL | |
| 1453 int timeout; | |
| 1454 struct pollfd *pollfds; /* an array of pollfd structures */ | |
| 1455 struct pollfd *pollfdPtr; /* a pointer that steps through the array */ | |
| 1456 unsigned long npollfds; /* number of pollfd structures in array */ | |
| 1457 unsigned long pollfds_size; | |
| 1458 int nfd; /* to hold the return value of poll() */ | |
| 1459 #else | |
| 1460 struct timeval timeout, *tvp; | |
| 1461 fd_set r, w, e; | |
| 1462 fd_set *rp, *wp, *ep; | |
| 1463 PRInt32 max_osfd, nfd; | |
| 1464 #endif /* _PR_USE_POLL */ | |
| 1465 PRInt32 rv; | |
| 1466 PRCList *q; | |
| 1467 PRUint32 min_timeout; | |
| 1468 sigset_t oldset; | |
| 1469 #ifdef IRIX | |
| 1470 extern sigset_t ints_off; | |
| 1471 #endif | |
| 1472 | |
| 1473 PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); | |
| 1474 | |
| 1475 _PR_MD_IOQ_LOCK(); | |
| 1476 | |
| 1477 #ifdef _PR_USE_POLL | |
| 1478 /* Build up the pollfd structure array to wait on */ | |
| 1479 | |
| 1480 /* Find out how many pollfd structures are needed */ | |
| 1481 npollfds = _PR_IOQ_OSFD_CNT(me->cpu); | |
| 1482 PR_ASSERT(npollfds >= 0); | |
| 1483 | |
| 1484 /* | |
| 1485 * We use a pipe to wake up a native thread. An fd is needed | |
| 1486 * for the pipe and we poll it for reading. | |
| 1487 */ | |
| 1488 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
| 1489 npollfds++; | |
| 1490 #ifdef IRIX | |
| 1491 /* | |
| 1492 * On Irix, a second pipe is used to cause the primordial cpu to | |
| 1493 * wakeup and exit, when the process is exiting because of a cal
l | |
| 1494 * to exit/PR_ProcessExit. | |
| 1495 */ | |
| 1496 if (me->cpu->id == 0) { | |
| 1497 npollfds++; | |
| 1498 } | |
| 1499 #endif | |
| 1500 } | |
| 1501 | |
| 1502 /* | |
| 1503 * if the cpu's pollfd array is not big enough, release it and allocate a ne
w one | |
| 1504 */ | |
| 1505 if (npollfds > _PR_IOQ_POLLFDS_SIZE(me->cpu)) { | |
| 1506 if (_PR_IOQ_POLLFDS(me->cpu) != NULL) | |
| 1507 PR_DELETE(_PR_IOQ_POLLFDS(me->cpu)); | |
| 1508 pollfds_size = PR_MAX(_PR_IOQ_MIN_POLLFDS_SIZE(me->cpu), npollfds); | |
| 1509 pollfds = (struct pollfd *) PR_MALLOC(pollfds_size * sizeof(struct pollf
d)); | |
| 1510 _PR_IOQ_POLLFDS(me->cpu) = pollfds; | |
| 1511 _PR_IOQ_POLLFDS_SIZE(me->cpu) = pollfds_size; | |
| 1512 } else { | |
| 1513 pollfds = _PR_IOQ_POLLFDS(me->cpu); | |
| 1514 } | |
| 1515 pollfdPtr = pollfds; | |
| 1516 | |
| 1517 /* | |
| 1518 * If we need to poll the pipe for waking up a native thread, | |
| 1519 * the pipe's fd is the first element in the pollfds array. | |
| 1520 */ | |
| 1521 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
| 1522 pollfdPtr->fd = _pr_md_pipefd[0]; | |
| 1523 pollfdPtr->events = POLLIN; | |
| 1524 pollfdPtr++; | |
| 1525 #ifdef IRIX | |
| 1526 /* | |
| 1527 * On Irix, the second element is the exit pipe | |
| 1528 */ | |
| 1529 if (me->cpu->id == 0) { | |
| 1530 pollfdPtr->fd = _pr_irix_primoridal_cpu_fd[0]; | |
| 1531 pollfdPtr->events = POLLIN; | |
| 1532 pollfdPtr++; | |
| 1533 } | |
| 1534 #endif | |
| 1535 } | |
| 1536 | |
| 1537 min_timeout = PR_INTERVAL_NO_TIMEOUT; | |
| 1538 for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { | |
| 1539 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
| 1540 _PRUnixPollDesc *pds = pq->pds; | |
| 1541 _PRUnixPollDesc *epds = pds + pq->npds; | |
| 1542 | |
| 1543 if (pq->timeout < min_timeout) { | |
| 1544 min_timeout = pq->timeout; | |
| 1545 } | |
| 1546 for (; pds < epds; pds++, pollfdPtr++) { | |
| 1547 /* | |
| 1548 * Assert that the pollfdPtr pointer does not go | |
| 1549 * beyond the end of the pollfds array | |
| 1550 */ | |
| 1551 PR_ASSERT(pollfdPtr < pollfds + npollfds); | |
| 1552 pollfdPtr->fd = pds->osfd; | |
| 1553 /* direct copy of poll flags */ | |
| 1554 pollfdPtr->events = pds->in_flags; | |
| 1555 } | |
| 1556 } | |
| 1557 _PR_IOQ_TIMEOUT(me->cpu) = min_timeout; | |
| 1558 #else | |
| 1559 /* | |
| 1560 * assigment of fd_sets | |
| 1561 */ | |
| 1562 r = _PR_FD_READ_SET(me->cpu); | |
| 1563 w = _PR_FD_WRITE_SET(me->cpu); | |
| 1564 e = _PR_FD_EXCEPTION_SET(me->cpu); | |
| 1565 | |
| 1566 rp = &r; | |
| 1567 wp = &w; | |
| 1568 ep = &e; | |
| 1569 | |
| 1570 max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1; | |
| 1571 min_timeout = _PR_IOQ_TIMEOUT(me->cpu); | |
| 1572 #endif /* _PR_USE_POLL */ | |
| 1573 /* | |
| 1574 ** Compute the minimum timeout value: make it the smaller of the | |
| 1575 ** timeouts specified by the i/o pollers or the timeout of the first | |
| 1576 ** sleeping thread. | |
| 1577 */ | |
| 1578 q = _PR_SLEEPQ(me->cpu).next; | |
| 1579 | |
| 1580 if (q != &_PR_SLEEPQ(me->cpu)) { | |
| 1581 PRThread *t = _PR_THREAD_PTR(q); | |
| 1582 | |
| 1583 if (t->sleep < min_timeout) { | |
| 1584 min_timeout = t->sleep; | |
| 1585 } | |
| 1586 } | |
| 1587 if (min_timeout > ticks) { | |
| 1588 min_timeout = ticks; | |
| 1589 } | |
| 1590 | |
| 1591 #ifdef _PR_USE_POLL | |
| 1592 if (min_timeout == PR_INTERVAL_NO_TIMEOUT) | |
| 1593 timeout = -1; | |
| 1594 else | |
| 1595 timeout = PR_IntervalToMilliseconds(min_timeout); | |
| 1596 #else | |
| 1597 if (min_timeout == PR_INTERVAL_NO_TIMEOUT) { | |
| 1598 tvp = NULL; | |
| 1599 } else { | |
| 1600 timeout.tv_sec = PR_IntervalToSeconds(min_timeout); | |
| 1601 timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout) | |
| 1602 % PR_USEC_PER_SEC; | |
| 1603 tvp = &timeout; | |
| 1604 } | |
| 1605 #endif /* _PR_USE_POLL */ | |
| 1606 | |
| 1607 _PR_MD_IOQ_UNLOCK(); | |
| 1608 _MD_CHECK_FOR_EXIT(); | |
| 1609 /* | |
| 1610 * check for i/o operations | |
| 1611 */ | |
| 1612 #ifndef _PR_NO_CLOCK_TIMER | |
| 1613 /* | |
| 1614 * Disable the clock interrupts while we are in select, if clock interrupts | |
| 1615 * are enabled. Otherwise, when the select/poll calls are interrupted, the | |
| 1616 * timer value starts ticking from zero again when the system call is restar
ted. | |
| 1617 */ | |
| 1618 #ifdef IRIX | |
| 1619 /* | |
| 1620 * SIGCHLD signal is used on Irix to detect he termination of an | |
| 1621 * sproc by SIGSEGV, SIGBUS or SIGABRT signals when | |
| 1622 * _nspr_terminate_on_error is set. | |
| 1623 */ | |
| 1624 if ((!_nspr_noclock) || (_nspr_terminate_on_error)) | |
| 1625 #else | |
| 1626 if (!_nspr_noclock) | |
| 1627 #endif /* IRIX */ | |
| 1628 #ifdef IRIX | |
| 1629 sigprocmask(SIG_BLOCK, &ints_off, &oldset); | |
| 1630 #else | |
| 1631 PR_ASSERT(sigismember(&timer_set, SIGALRM)); | |
| 1632 sigprocmask(SIG_BLOCK, &timer_set, &oldset); | |
| 1633 #endif /* IRIX */ | |
| 1634 #endif /* !_PR_NO_CLOCK_TIMER */ | |
| 1635 | |
| 1636 #ifndef _PR_USE_POLL | |
| 1637 PR_ASSERT(FD_ISSET(_pr_md_pipefd[0],rp)); | |
| 1638 nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp); | |
| 1639 #else | |
| 1640 nfd = _MD_POLL(pollfds, npollfds, timeout); | |
| 1641 #endif /* !_PR_USE_POLL */ | |
| 1642 | |
| 1643 #ifndef _PR_NO_CLOCK_TIMER | |
| 1644 #ifdef IRIX | |
| 1645 if ((!_nspr_noclock) || (_nspr_terminate_on_error)) | |
| 1646 #else | |
| 1647 if (!_nspr_noclock) | |
| 1648 #endif /* IRIX */ | |
| 1649 sigprocmask(SIG_SETMASK, &oldset, 0); | |
| 1650 #endif /* !_PR_NO_CLOCK_TIMER */ | |
| 1651 | |
| 1652 _MD_CHECK_FOR_EXIT(); | |
| 1653 | |
| 1654 #ifdef IRIX | |
| 1655 _PR_MD_primordial_cpu(); | |
| 1656 #endif | |
| 1657 | |
| 1658 _PR_MD_IOQ_LOCK(); | |
| 1659 /* | |
| 1660 ** Notify monitors that are associated with the selected descriptors. | |
| 1661 */ | |
| 1662 #ifdef _PR_USE_POLL | |
| 1663 if (nfd > 0) { | |
| 1664 pollfdPtr = pollfds; | |
| 1665 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
| 1666 /* | |
| 1667 * Assert that the pipe is the first element in the | |
| 1668 * pollfds array. | |
| 1669 */ | |
| 1670 PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]); | |
| 1671 if ((pollfds[0].revents & POLLIN) && (nfd == 1)) { | |
| 1672 /* | |
| 1673 * woken up by another thread; read all the data | |
| 1674 * in the pipe to empty the pipe | |
| 1675 */ | |
| 1676 while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf, | |
| 1677 PIPE_BUF)) == PIPE_BUF){ | |
| 1678 } | |
| 1679 PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN))); | |
| 1680 } | |
| 1681 pollfdPtr++; | |
| 1682 #ifdef IRIX | |
| 1683 /* | |
| 1684 * On Irix, check to see if the primordial cpu needs to
exit | |
| 1685 * to cause the process to terminate | |
| 1686 */ | |
| 1687 if (me->cpu->id == 0) { | |
| 1688 PR_ASSERT(pollfds[1].fd == _pr_irix_primoridal_cpu_fd[0]); | |
| 1689 if (pollfdPtr->revents & POLLIN) { | |
| 1690 if (_pr_irix_process_exit) { | |
| 1691 /* | |
| 1692 * process exit due to a call to
PR_ProcessExit | |
| 1693 */ | |
| 1694 prctl(PR_SETEXITSIG, SIGKILL); | |
| 1695 _exit(_pr_irix_process_exit_code
); | |
| 1696 } else { | |
| 1697 while ((rv = read(_pr_irix_primo
ridal_cpu_fd[0], | |
| 1698 _pr_md_pipebuf, PIPE_BUF
)) == PIPE_BUF) { | |
| 1699 } | |
| 1700 PR_ASSERT(rv > 0); | |
| 1701 } | |
| 1702 } | |
| 1703 pollfdPtr++; | |
| 1704 } | |
| 1705 #endif | |
| 1706 } | |
| 1707 for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { | |
| 1708 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
| 1709 PRBool notify = PR_FALSE; | |
| 1710 _PRUnixPollDesc *pds = pq->pds; | |
| 1711 _PRUnixPollDesc *epds = pds + pq->npds; | |
| 1712 | |
| 1713 for (; pds < epds; pds++, pollfdPtr++) { | |
| 1714 /* | |
| 1715 * Assert that the pollfdPtr pointer does not go beyond | |
| 1716 * the end of the pollfds array. | |
| 1717 */ | |
| 1718 PR_ASSERT(pollfdPtr < pollfds + npollfds); | |
| 1719 /* | |
| 1720 * Assert that the fd's in the pollfds array (stepped | |
| 1721 * through by pollfdPtr) are in the same order as | |
| 1722 * the fd's in _PR_IOQ() (stepped through by q and pds). | |
| 1723 * This is how the pollfds array was created earlier. | |
| 1724 */ | |
| 1725 PR_ASSERT(pollfdPtr->fd == pds->osfd); | |
| 1726 pds->out_flags = pollfdPtr->revents; | |
| 1727 /* Negative fd's are ignored by poll() */ | |
| 1728 if (pds->osfd >= 0 && pds->out_flags) { | |
| 1729 notify = PR_TRUE; | |
| 1730 } | |
| 1731 } | |
| 1732 if (notify) { | |
| 1733 PRIntn pri; | |
| 1734 PRThread *thred; | |
| 1735 | |
| 1736 PR_REMOVE_LINK(&pq->links); | |
| 1737 pq->on_ioq = PR_FALSE; | |
| 1738 | |
| 1739 thred = pq->thr; | |
| 1740 _PR_THREAD_LOCK(thred); | |
| 1741 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
| 1742 _PRCPU *cpu = pq->thr->cpu; | |
| 1743 _PR_SLEEPQ_LOCK(pq->thr->cpu); | |
| 1744 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); | |
| 1745 _PR_SLEEPQ_UNLOCK(pq->thr->cpu); | |
| 1746 | |
| 1747 if (pq->thr->flags & _PR_SUSPENDING) { | |
| 1748 /* | |
| 1749 * set thread state to SUSPENDED; | |
| 1750 * a Resume operation on the thread | |
| 1751 * will move it to the runQ | |
| 1752 */ | |
| 1753 pq->thr->state = _PR_SUSPENDED; | |
| 1754 _PR_MISCQ_LOCK(pq->thr->cpu); | |
| 1755 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->c
pu); | |
| 1756 _PR_MISCQ_UNLOCK(pq->thr->cpu); | |
| 1757 } else { | |
| 1758 pri = pq->thr->priority; | |
| 1759 pq->thr->state = _PR_RUNNABLE; | |
| 1760 | |
| 1761 _PR_RUNQ_LOCK(cpu); | |
| 1762 _PR_ADD_RUNQ(pq->thr, cpu, pri); | |
| 1763 _PR_RUNQ_UNLOCK(cpu); | |
| 1764 if (_pr_md_idle_cpus > 1) | |
| 1765 _PR_MD_WAKEUP_WAITER(thr
ed); | |
| 1766 } | |
| 1767 } | |
| 1768 _PR_THREAD_UNLOCK(thred); | |
| 1769 _PR_IOQ_OSFD_CNT(me->cpu) -= pq->npds; | |
| 1770 PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); | |
| 1771 } | |
| 1772 } | |
| 1773 } else if (nfd == -1) { | |
| 1774 PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno)); | |
| 1775 } | |
| 1776 | |
| 1777 #else | |
| 1778 if (nfd > 0) { | |
| 1779 q = _PR_IOQ(me->cpu).next; | |
| 1780 _PR_IOQ_MAX_OSFD(me->cpu) = -1; | |
| 1781 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; | |
| 1782 while (q != &_PR_IOQ(me->cpu)) { | |
| 1783 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
| 1784 PRBool notify = PR_FALSE; | |
| 1785 _PRUnixPollDesc *pds = pq->pds; | |
| 1786 _PRUnixPollDesc *epds = pds + pq->npds; | |
| 1787 PRInt32 pq_max_osfd = -1; | |
| 1788 | |
| 1789 q = q->next; | |
| 1790 for (; pds < epds; pds++) { | |
| 1791 PRInt32 osfd = pds->osfd; | |
| 1792 PRInt16 in_flags = pds->in_flags; | |
| 1793 PRInt16 out_flags = 0; | |
| 1794 PR_ASSERT(osfd >= 0 || in_flags == 0); | |
| 1795 if ((in_flags & _PR_UNIX_POLL_READ) && FD_ISSET(osfd, rp)) { | |
| 1796 out_flags |= _PR_UNIX_POLL_READ; | |
| 1797 } | |
| 1798 if ((in_flags & _PR_UNIX_POLL_WRITE) && FD_ISSET(osfd, wp)) { | |
| 1799 out_flags |= _PR_UNIX_POLL_WRITE; | |
| 1800 } | |
| 1801 if ((in_flags & _PR_UNIX_POLL_EXCEPT) && FD_ISSET(osfd, ep)) { | |
| 1802 out_flags |= _PR_UNIX_POLL_EXCEPT; | |
| 1803 } | |
| 1804 pds->out_flags = out_flags; | |
| 1805 if (out_flags) { | |
| 1806 notify = PR_TRUE; | |
| 1807 } | |
| 1808 if (osfd > pq_max_osfd) { | |
| 1809 pq_max_osfd = osfd; | |
| 1810 } | |
| 1811 } | |
| 1812 if (notify == PR_TRUE) { | |
| 1813 PRIntn pri; | |
| 1814 PRThread *thred; | |
| 1815 | |
| 1816 PR_REMOVE_LINK(&pq->links); | |
| 1817 pq->on_ioq = PR_FALSE; | |
| 1818 | |
| 1819 /* | |
| 1820 * Decrement the count of descriptors for each desciptor/event | |
| 1821 * because this I/O request is being removed from the | |
| 1822 * ioq | |
| 1823 */ | |
| 1824 pds = pq->pds; | |
| 1825 for (; pds < epds; pds++) { | |
| 1826 PRInt32 osfd = pds->osfd; | |
| 1827 PRInt16 in_flags = pds->in_flags; | |
| 1828 PR_ASSERT(osfd >= 0 || in_flags == 0); | |
| 1829 if (in_flags & _PR_UNIX_POLL_READ) { | |
| 1830 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) | |
| 1831 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); | |
| 1832 } | |
| 1833 if (in_flags & _PR_UNIX_POLL_WRITE) { | |
| 1834 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) | |
| 1835 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
| 1836 } | |
| 1837 if (in_flags & _PR_UNIX_POLL_EXCEPT) { | |
| 1838 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) | |
| 1839 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
| 1840 } | |
| 1841 } | |
| 1842 | |
| 1843 /* | |
| 1844 * Because this thread can run on a different cpu right | |
| 1845 * after being added to the run queue, do not dereference | |
| 1846 * pq | |
| 1847 */ | |
| 1848 thred = pq->thr; | |
| 1849 _PR_THREAD_LOCK(thred); | |
| 1850 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
| 1851 _PRCPU *cpu = thred->cpu; | |
| 1852 _PR_SLEEPQ_LOCK(pq->thr->cpu); | |
| 1853 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); | |
| 1854 _PR_SLEEPQ_UNLOCK(pq->thr->cpu); | |
| 1855 | |
| 1856 if (pq->thr->flags & _PR_SUSPENDING) { | |
| 1857 /* | |
| 1858 * set thread state to SUSPENDED; | |
| 1859 * a Resume operation on the thread | |
| 1860 * will move it to the runQ | |
| 1861 */ | |
| 1862 pq->thr->state = _PR_SUSPENDED; | |
| 1863 _PR_MISCQ_LOCK(pq->thr->cpu); | |
| 1864 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->c
pu); | |
| 1865 _PR_MISCQ_UNLOCK(pq->thr->cpu); | |
| 1866 } else { | |
| 1867 pri = pq->thr->priority; | |
| 1868 pq->thr->state = _PR_RUNNABLE; | |
| 1869 | |
| 1870 pq->thr->cpu = cpu; | |
| 1871 _PR_RUNQ_LOCK(cpu); | |
| 1872 _PR_ADD_RUNQ(pq->thr, cpu, pri); | |
| 1873 _PR_RUNQ_UNLOCK(cpu); | |
| 1874 if (_pr_md_idle_cpus > 1) | |
| 1875 _PR_MD_WAKEUP_WAITER(thr
ed); | |
| 1876 } | |
| 1877 } | |
| 1878 _PR_THREAD_UNLOCK(thred); | |
| 1879 } else { | |
| 1880 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) | |
| 1881 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; | |
| 1882 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) | |
| 1883 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; | |
| 1884 } | |
| 1885 } | |
| 1886 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
| 1887 if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) { | |
| 1888 /* | |
| 1889 * woken up by another thread; read all the data | |
| 1890 * in the pipe to empty the pipe | |
| 1891 */ | |
| 1892 while ((rv = | |
| 1893 read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) | |
| 1894 == PIPE_BUF){ | |
| 1895 } | |
| 1896 PR_ASSERT((rv > 0) || | |
| 1897 ((rv == -1) && (errno == EAGAIN))); | |
| 1898 } | |
| 1899 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) | |
| 1900 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
| 1901 #ifdef IRIX | |
| 1902 if ((me->cpu->id == 0) && | |
| 1903 (FD_ISSET(_pr_irix_primoridal_cp
u_fd[0], rp))) { | |
| 1904 if (_pr_irix_process_exit) { | |
| 1905 /* | |
| 1906 * process exit due to a call to PR_Proc
essExit | |
| 1907 */ | |
| 1908 prctl(PR_SETEXITSIG, SIGKILL); | |
| 1909 _exit(_pr_irix_process_exit_code); | |
| 1910 } else { | |
| 1911 while ((rv = read(_pr_irix_primo
ridal_cpu_fd[0], | |
| 1912 _pr_md_pipebuf, PIPE_BUF
)) == PIPE_BUF) { | |
| 1913 } | |
| 1914 PR_ASSERT(rv > 0); | |
| 1915 } | |
| 1916 } | |
| 1917 if (me->cpu->id == 0) { | |
| 1918 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_irix_primori
dal_cpu_fd[0]) | |
| 1919 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_pri
moridal_cpu_fd[0]; | |
| 1920 } | |
| 1921 #endif | |
| 1922 } | |
| 1923 } else if (nfd < 0) { | |
| 1924 if (errno == EBADF) { | |
| 1925 FindBadFDs(); | |
| 1926 } else { | |
| 1927 PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d", | |
| 1928 errno)); | |
| 1929 } | |
| 1930 } else { | |
| 1931 PR_ASSERT(nfd == 0); | |
| 1932 /* | |
| 1933 * compute the new value of _PR_IOQ_TIMEOUT | |
| 1934 */ | |
| 1935 q = _PR_IOQ(me->cpu).next; | |
| 1936 _PR_IOQ_MAX_OSFD(me->cpu) = -1; | |
| 1937 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; | |
| 1938 while (q != &_PR_IOQ(me->cpu)) { | |
| 1939 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
| 1940 _PRUnixPollDesc *pds = pq->pds; | |
| 1941 _PRUnixPollDesc *epds = pds + pq->npds; | |
| 1942 PRInt32 pq_max_osfd = -1; | |
| 1943 | |
| 1944 q = q->next; | |
| 1945 for (; pds < epds; pds++) { | |
| 1946 if (pds->osfd > pq_max_osfd) { | |
| 1947 pq_max_osfd = pds->osfd; | |
| 1948 } | |
| 1949 } | |
| 1950 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) | |
| 1951 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; | |
| 1952 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) | |
| 1953 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; | |
| 1954 } | |
| 1955 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
| 1956 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) | |
| 1957 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
| 1958 } | |
| 1959 } | |
| 1960 #endif /* _PR_USE_POLL */ | |
| 1961 _PR_MD_IOQ_UNLOCK(); | |
| 1962 } | |
| 1963 | |
| 1964 void _MD_Wakeup_CPUs() | |
| 1965 { | |
| 1966 PRInt32 rv, data; | |
| 1967 | |
| 1968 data = 0; | |
| 1969 rv = write(_pr_md_pipefd[1], &data, 1); | |
| 1970 | |
| 1971 while ((rv < 0) && (errno == EAGAIN)) { | |
| 1972 /* | |
| 1973 * pipe full, read all data in pipe to empty it | |
| 1974 */ | |
| 1975 while ((rv = | |
| 1976 read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) | |
| 1977 == PIPE_BUF) { | |
| 1978 } | |
| 1979 PR_ASSERT((rv > 0) || | |
| 1980 ((rv == -1) && (errno == EAGAIN))); | |
| 1981 rv = write(_pr_md_pipefd[1], &data, 1); | |
| 1982 } | |
| 1983 } | |
| 1984 | |
| 1985 | |
| 1986 void _MD_InitCPUS() | |
| 1987 { | |
| 1988 PRInt32 rv, flags; | |
| 1989 PRThread *me = _MD_CURRENT_THREAD(); | |
| 1990 | |
| 1991 rv = pipe(_pr_md_pipefd); | |
| 1992 PR_ASSERT(rv == 0); | |
| 1993 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
| 1994 #ifndef _PR_USE_POLL | |
| 1995 FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu)); | |
| 1996 #endif | |
| 1997 | |
| 1998 flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0); | |
| 1999 fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK); | |
| 2000 flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0); | |
| 2001 fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK); | |
| 2002 } | |
| 2003 | |
| 2004 /* | |
| 2005 ** Unix SIGALRM (clock) signal handler | |
| 2006 */ | |
| 2007 static void ClockInterruptHandler() | |
| 2008 { | |
| 2009 int olderrno; | |
| 2010 PRUintn pri; | |
| 2011 _PRCPU *cpu = _PR_MD_CURRENT_CPU(); | |
| 2012 PRThread *me = _MD_CURRENT_THREAD(); | |
| 2013 | |
| 2014 #ifdef SOLARIS | |
| 2015 if (!me || _PR_IS_NATIVE_THREAD(me)) { | |
| 2016 _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOC
K; | |
| 2017 return; | |
| 2018 } | |
| 2019 #endif | |
| 2020 | |
| 2021 if (_PR_MD_GET_INTSOFF() != 0) { | |
| 2022 cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK; | |
| 2023 return; | |
| 2024 } | |
| 2025 _PR_MD_SET_INTSOFF(1); | |
| 2026 | |
| 2027 olderrno = errno; | |
| 2028 _PR_ClockInterrupt(); | |
| 2029 errno = olderrno; | |
| 2030 | |
| 2031 /* | |
| 2032 ** If the interrupt wants a resched or if some other thread at | |
| 2033 ** the same priority needs the cpu, reschedule. | |
| 2034 */ | |
| 2035 pri = me->priority; | |
| 2036 if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) { | |
| 2037 #ifdef _PR_NO_PREEMPT | |
| 2038 cpu->resched = PR_TRUE; | |
| 2039 if (pr_interruptSwitchHook) { | |
| 2040 (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg); | |
| 2041 } | |
| 2042 #else /* _PR_NO_PREEMPT */ | |
| 2043 /* | |
| 2044 ** Re-enable unix interrupts (so that we can use | |
| 2045 ** setjmp/longjmp for context switching without having to | |
| 2046 ** worry about the signal state) | |
| 2047 */ | |
| 2048 sigprocmask(SIG_SETMASK, &empty_set, 0); | |
| 2049 PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch")); | |
| 2050 | |
| 2051 if(!(me->flags & _PR_IDLE_THREAD)) { | |
| 2052 _PR_THREAD_LOCK(me); | |
| 2053 me->state = _PR_RUNNABLE; | |
| 2054 me->cpu = cpu; | |
| 2055 _PR_RUNQ_LOCK(cpu); | |
| 2056 _PR_ADD_RUNQ(me, cpu, pri); | |
| 2057 _PR_RUNQ_UNLOCK(cpu); | |
| 2058 _PR_THREAD_UNLOCK(me); | |
| 2059 } else | |
| 2060 me->state = _PR_RUNNABLE; | |
| 2061 _MD_SWITCH_CONTEXT(me); | |
| 2062 PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch")); | |
| 2063 #endif /* _PR_NO_PREEMPT */ | |
| 2064 } | |
| 2065 /* | |
| 2066 * Because this thread could be running on a different cpu after | |
| 2067 * a context switch the current cpu should be accessed and the | |
| 2068 * value of the 'cpu' variable should not be used. | |
| 2069 */ | |
| 2070 _PR_MD_SET_INTSOFF(0); | |
| 2071 } | |
| 2072 | |
| 2073 /* | |
| 2074 * On HP-UX 9, we have to use the sigvector() interface to restart | |
| 2075 * interrupted system calls, because sigaction() does not have the | |
| 2076 * SA_RESTART flag. | |
| 2077 */ | |
| 2078 | |
| 2079 #ifdef HPUX9 | |
| 2080 static void HPUX9_ClockInterruptHandler( | |
| 2081 int sig, | |
| 2082 int code, | |
| 2083 struct sigcontext *scp) | |
| 2084 { | |
| 2085 ClockInterruptHandler(); | |
| 2086 scp->sc_syscall_action = SIG_RESTART; | |
| 2087 } | |
| 2088 #endif /* HPUX9 */ | |
| 2089 | |
| 2090 /* # of milliseconds per clock tick that we will use */ | |
| 2091 #define MSEC_PER_TICK 50 | |
| 2092 | |
| 2093 | |
| 2094 void _MD_StartInterrupts() | |
| 2095 { | |
| 2096 char *eval; | |
| 2097 | |
| 2098 if ((eval = getenv("NSPR_NOCLOCK")) != NULL) { | |
| 2099 if (atoi(eval) == 0) | |
| 2100 _nspr_noclock = 0; | |
| 2101 else | |
| 2102 _nspr_noclock = 1; | |
| 2103 } | |
| 2104 | |
| 2105 #ifndef _PR_NO_CLOCK_TIMER | |
| 2106 if (!_nspr_noclock) { | |
| 2107 _MD_EnableClockInterrupts(); | |
| 2108 } | |
| 2109 #endif | |
| 2110 } | |
| 2111 | |
| 2112 void _MD_StopInterrupts() | |
| 2113 { | |
| 2114 sigprocmask(SIG_BLOCK, &timer_set, 0); | |
| 2115 } | |
| 2116 | |
| 2117 void _MD_EnableClockInterrupts() | |
| 2118 { | |
| 2119 struct itimerval itval; | |
| 2120 extern PRUintn _pr_numCPU; | |
| 2121 #ifdef HPUX9 | |
| 2122 struct sigvec vec; | |
| 2123 | |
| 2124 vec.sv_handler = (void (*)()) HPUX9_ClockInterruptHandler; | |
| 2125 vec.sv_mask = 0; | |
| 2126 vec.sv_flags = 0; | |
| 2127 sigvector(SIGALRM, &vec, 0); | |
| 2128 #else | |
| 2129 struct sigaction vtact; | |
| 2130 | |
| 2131 vtact.sa_handler = (void (*)()) ClockInterruptHandler; | |
| 2132 sigemptyset(&vtact.sa_mask); | |
| 2133 vtact.sa_flags = SA_RESTART; | |
| 2134 sigaction(SIGALRM, &vtact, 0); | |
| 2135 #endif /* HPUX9 */ | |
| 2136 | |
| 2137 PR_ASSERT(_pr_numCPU == 1); | |
| 2138 itval.it_interval.tv_sec = 0; | |
| 2139 itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC; | |
| 2140 itval.it_value = itval.it_interval; | |
| 2141 setitimer(ITIMER_REAL, &itval, 0); | |
| 2142 } | |
| 2143 | |
| 2144 void _MD_DisableClockInterrupts() | |
| 2145 { | |
| 2146 struct itimerval itval; | |
| 2147 extern PRUintn _pr_numCPU; | |
| 2148 | |
| 2149 PR_ASSERT(_pr_numCPU == 1); | |
| 2150 itval.it_interval.tv_sec = 0; | |
| 2151 itval.it_interval.tv_usec = 0; | |
| 2152 itval.it_value = itval.it_interval; | |
| 2153 setitimer(ITIMER_REAL, &itval, 0); | |
| 2154 } | |
| 2155 | |
| 2156 void _MD_BlockClockInterrupts() | |
| 2157 { | |
| 2158 sigprocmask(SIG_BLOCK, &timer_set, 0); | |
| 2159 } | |
| 2160 | |
| 2161 void _MD_UnblockClockInterrupts() | |
| 2162 { | |
| 2163 sigprocmask(SIG_UNBLOCK, &timer_set, 0); | |
| 2164 } | |
| 2165 | |
| 2166 void _MD_MakeNonblock(PRFileDesc *fd) | |
| 2167 { | |
| 2168 PRInt32 osfd = fd->secret->md.osfd; | |
| 2169 int flags; | |
| 2170 | |
| 2171 if (osfd <= 2) { | |
| 2172 /* Don't mess around with stdin, stdout or stderr */ | |
| 2173 return; | |
| 2174 } | |
| 2175 flags = fcntl(osfd, F_GETFL, 0); | |
| 2176 | |
| 2177 /* | |
| 2178 * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible. | |
| 2179 * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O), | |
| 2180 * otherwise connect() still blocks and can be interrupted by SIGALRM. | |
| 2181 */ | |
| 2182 | |
| 2183 fcntl(osfd, F_SETFL, flags | O_NONBLOCK); | |
| 2184 } | |
| 2185 | |
| 2186 PRInt32 _MD_open(const char *name, PRIntn flags, PRIntn mode) | |
| 2187 { | |
| 2188 PRInt32 osflags; | |
| 2189 PRInt32 rv, err; | |
| 2190 | |
| 2191 if (flags & PR_RDWR) { | |
| 2192 osflags = O_RDWR; | |
| 2193 } else if (flags & PR_WRONLY) { | |
| 2194 osflags = O_WRONLY; | |
| 2195 } else { | |
| 2196 osflags = O_RDONLY; | |
| 2197 } | |
| 2198 | |
| 2199 if (flags & PR_EXCL) | |
| 2200 osflags |= O_EXCL; | |
| 2201 if (flags & PR_APPEND) | |
| 2202 osflags |= O_APPEND; | |
| 2203 if (flags & PR_TRUNCATE) | |
| 2204 osflags |= O_TRUNC; | |
| 2205 if (flags & PR_SYNC) { | |
| 2206 #if defined(O_SYNC) | |
| 2207 osflags |= O_SYNC; | |
| 2208 #elif defined(O_FSYNC) | |
| 2209 osflags |= O_FSYNC; | |
| 2210 #else | |
| 2211 #error "Neither O_SYNC nor O_FSYNC is defined on this platform" | |
| 2212 #endif | |
| 2213 } | |
| 2214 | |
| 2215 /* | |
| 2216 ** On creations we hold the 'create' lock in order to enforce | |
| 2217 ** the semantics of PR_Rename. (see the latter for more details) | |
| 2218 */ | |
| 2219 if (flags & PR_CREATE_FILE) | |
| 2220 { | |
| 2221 osflags |= O_CREAT; | |
| 2222 if (NULL !=_pr_rename_lock) | |
| 2223 PR_Lock(_pr_rename_lock); | |
| 2224 } | |
| 2225 | |
| 2226 #if defined(ANDROID) | |
| 2227 osflags |= O_LARGEFILE; | |
| 2228 #endif | |
| 2229 | |
| 2230 rv = _md_iovector._open64(name, osflags, mode); | |
| 2231 | |
| 2232 if (rv < 0) { | |
| 2233 err = _MD_ERRNO(); | |
| 2234 _PR_MD_MAP_OPEN_ERROR(err); | |
| 2235 } | |
| 2236 | |
| 2237 if ((flags & PR_CREATE_FILE) && (NULL !=_pr_rename_lock)) | |
| 2238 PR_Unlock(_pr_rename_lock); | |
| 2239 return rv; | |
| 2240 } | |
| 2241 | |
| 2242 PRIntervalTime intr_timeout_ticks; | |
| 2243 | |
| 2244 #if defined(SOLARIS) || defined(IRIX) | |
| 2245 static void sigsegvhandler() { | |
| 2246 fprintf(stderr,"Received SIGSEGV\n"); | |
| 2247 fflush(stderr); | |
| 2248 pause(); | |
| 2249 } | |
| 2250 | |
| 2251 static void sigaborthandler() { | |
| 2252 fprintf(stderr,"Received SIGABRT\n"); | |
| 2253 fflush(stderr); | |
| 2254 pause(); | |
| 2255 } | |
| 2256 | |
| 2257 static void sigbushandler() { | |
| 2258 fprintf(stderr,"Received SIGBUS\n"); | |
| 2259 fflush(stderr); | |
| 2260 pause(); | |
| 2261 } | |
| 2262 #endif /* SOLARIS, IRIX */ | |
| 2263 | |
| 2264 #endif /* !defined(_PR_PTHREADS) */ | |
| 2265 | |
| 2266 void _MD_query_fd_inheritable(PRFileDesc *fd) | |
| 2267 { | |
| 2268 int flags; | |
| 2269 | |
| 2270 PR_ASSERT(_PR_TRI_UNKNOWN == fd->secret->inheritable); | |
| 2271 flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); | |
| 2272 PR_ASSERT(-1 != flags); | |
| 2273 fd->secret->inheritable = (flags & FD_CLOEXEC) ? | |
| 2274 _PR_TRI_FALSE : _PR_TRI_TRUE; | |
| 2275 } | |
| 2276 | |
| 2277 PROffset32 _MD_lseek(PRFileDesc *fd, PROffset32 offset, PRSeekWhence whence) | |
| 2278 { | |
| 2279 PROffset32 rv, where; | |
| 2280 | |
| 2281 switch (whence) { | |
| 2282 case PR_SEEK_SET: | |
| 2283 where = SEEK_SET; | |
| 2284 break; | |
| 2285 case PR_SEEK_CUR: | |
| 2286 where = SEEK_CUR; | |
| 2287 break; | |
| 2288 case PR_SEEK_END: | |
| 2289 where = SEEK_END; | |
| 2290 break; | |
| 2291 default: | |
| 2292 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
| 2293 rv = -1; | |
| 2294 goto done; | |
| 2295 } | |
| 2296 rv = lseek(fd->secret->md.osfd,offset,where); | |
| 2297 if (rv == -1) | |
| 2298 { | |
| 2299 PRInt32 syserr = _MD_ERRNO(); | |
| 2300 _PR_MD_MAP_LSEEK_ERROR(syserr); | |
| 2301 } | |
| 2302 done: | |
| 2303 return(rv); | |
| 2304 } | |
| 2305 | |
| 2306 PROffset64 _MD_lseek64(PRFileDesc *fd, PROffset64 offset, PRSeekWhence whence) | |
| 2307 { | |
| 2308 PRInt32 where; | |
| 2309 PROffset64 rv; | |
| 2310 | |
| 2311 switch (whence) | |
| 2312 { | |
| 2313 case PR_SEEK_SET: | |
| 2314 where = SEEK_SET; | |
| 2315 break; | |
| 2316 case PR_SEEK_CUR: | |
| 2317 where = SEEK_CUR; | |
| 2318 break; | |
| 2319 case PR_SEEK_END: | |
| 2320 where = SEEK_END; | |
| 2321 break; | |
| 2322 default: | |
| 2323 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
| 2324 rv = minus_one; | |
| 2325 goto done; | |
| 2326 } | |
| 2327 rv = _md_iovector._lseek64(fd->secret->md.osfd, offset, where); | |
| 2328 if (LL_EQ(rv, minus_one)) | |
| 2329 { | |
| 2330 PRInt32 syserr = _MD_ERRNO(); | |
| 2331 _PR_MD_MAP_LSEEK_ERROR(syserr); | |
| 2332 } | |
| 2333 done: | |
| 2334 return rv; | |
| 2335 } /* _MD_lseek64 */ | |
| 2336 | |
| 2337 /* | |
| 2338 ** _MD_set_fileinfo_times -- | |
| 2339 ** Set the modifyTime and creationTime of the PRFileInfo | |
| 2340 ** structure using the values in struct stat. | |
| 2341 ** | |
| 2342 ** _MD_set_fileinfo64_times -- | |
| 2343 ** Set the modifyTime and creationTime of the PRFileInfo64 | |
| 2344 ** structure using the values in _MDStat64. | |
| 2345 */ | |
| 2346 | |
| 2347 #if defined(_PR_STAT_HAS_ST_ATIM) | |
| 2348 /* | |
| 2349 ** struct stat has st_atim, st_mtim, and st_ctim fields of | |
| 2350 ** type timestruc_t. | |
| 2351 */ | |
| 2352 static void _MD_set_fileinfo_times( | |
| 2353 const struct stat *sb, | |
| 2354 PRFileInfo *info) | |
| 2355 { | |
| 2356 PRInt64 us, s2us; | |
| 2357 | |
| 2358 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2359 LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); | |
| 2360 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2361 LL_I2L(us, sb->st_mtim.tv_nsec / 1000); | |
| 2362 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2363 LL_I2L(info->creationTime, sb->st_ctim.tv_sec); | |
| 2364 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2365 LL_I2L(us, sb->st_ctim.tv_nsec / 1000); | |
| 2366 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2367 } | |
| 2368 | |
| 2369 static void _MD_set_fileinfo64_times( | |
| 2370 const _MDStat64 *sb, | |
| 2371 PRFileInfo64 *info) | |
| 2372 { | |
| 2373 PRInt64 us, s2us; | |
| 2374 | |
| 2375 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2376 LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); | |
| 2377 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2378 LL_I2L(us, sb->st_mtim.tv_nsec / 1000); | |
| 2379 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2380 LL_I2L(info->creationTime, sb->st_ctim.tv_sec); | |
| 2381 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2382 LL_I2L(us, sb->st_ctim.tv_nsec / 1000); | |
| 2383 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2384 } | |
| 2385 #elif defined(_PR_STAT_HAS_ST_ATIM_UNION) | |
| 2386 /* | |
| 2387 ** The st_atim, st_mtim, and st_ctim fields in struct stat are | |
| 2388 ** unions with a st__tim union member of type timestruc_t. | |
| 2389 */ | |
| 2390 static void _MD_set_fileinfo_times( | |
| 2391 const struct stat *sb, | |
| 2392 PRFileInfo *info) | |
| 2393 { | |
| 2394 PRInt64 us, s2us; | |
| 2395 | |
| 2396 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2397 LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); | |
| 2398 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2399 LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); | |
| 2400 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2401 LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); | |
| 2402 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2403 LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); | |
| 2404 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2405 } | |
| 2406 | |
| 2407 static void _MD_set_fileinfo64_times( | |
| 2408 const _MDStat64 *sb, | |
| 2409 PRFileInfo64 *info) | |
| 2410 { | |
| 2411 PRInt64 us, s2us; | |
| 2412 | |
| 2413 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2414 LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); | |
| 2415 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2416 LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); | |
| 2417 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2418 LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); | |
| 2419 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2420 LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); | |
| 2421 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2422 } | |
| 2423 #elif defined(_PR_STAT_HAS_ST_ATIMESPEC) | |
| 2424 /* | |
| 2425 ** struct stat has st_atimespec, st_mtimespec, and st_ctimespec | |
| 2426 ** fields of type struct timespec. | |
| 2427 */ | |
| 2428 #if defined(_PR_TIMESPEC_HAS_TS_SEC) | |
| 2429 static void _MD_set_fileinfo_times( | |
| 2430 const struct stat *sb, | |
| 2431 PRFileInfo *info) | |
| 2432 { | |
| 2433 PRInt64 us, s2us; | |
| 2434 | |
| 2435 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2436 LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); | |
| 2437 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2438 LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); | |
| 2439 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2440 LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); | |
| 2441 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2442 LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); | |
| 2443 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2444 } | |
| 2445 | |
| 2446 static void _MD_set_fileinfo64_times( | |
| 2447 const _MDStat64 *sb, | |
| 2448 PRFileInfo64 *info) | |
| 2449 { | |
| 2450 PRInt64 us, s2us; | |
| 2451 | |
| 2452 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2453 LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); | |
| 2454 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2455 LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); | |
| 2456 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2457 LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); | |
| 2458 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2459 LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); | |
| 2460 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2461 } | |
| 2462 #else /* _PR_TIMESPEC_HAS_TS_SEC */ | |
| 2463 /* | |
| 2464 ** The POSIX timespec structure has tv_sec and tv_nsec. | |
| 2465 */ | |
| 2466 static void _MD_set_fileinfo_times( | |
| 2467 const struct stat *sb, | |
| 2468 PRFileInfo *info) | |
| 2469 { | |
| 2470 PRInt64 us, s2us; | |
| 2471 | |
| 2472 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2473 LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); | |
| 2474 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2475 LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); | |
| 2476 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2477 LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); | |
| 2478 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2479 LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); | |
| 2480 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2481 } | |
| 2482 | |
| 2483 static void _MD_set_fileinfo64_times( | |
| 2484 const _MDStat64 *sb, | |
| 2485 PRFileInfo64 *info) | |
| 2486 { | |
| 2487 PRInt64 us, s2us; | |
| 2488 | |
| 2489 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2490 LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); | |
| 2491 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
| 2492 LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); | |
| 2493 LL_ADD(info->modifyTime, info->modifyTime, us); | |
| 2494 LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); | |
| 2495 LL_MUL(info->creationTime, info->creationTime, s2us); | |
| 2496 LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); | |
| 2497 LL_ADD(info->creationTime, info->creationTime, us); | |
| 2498 } | |
| 2499 #endif /* _PR_TIMESPEC_HAS_TS_SEC */ | |
| 2500 #elif defined(_PR_STAT_HAS_ONLY_ST_ATIME) | |
| 2501 /* | |
| 2502 ** struct stat only has st_atime, st_mtime, and st_ctime fields | |
| 2503 ** of type time_t. | |
| 2504 */ | |
| 2505 static void _MD_set_fileinfo_times( | |
| 2506 const struct stat *sb, | |
| 2507 PRFileInfo *info) | |
| 2508 { | |
| 2509 PRInt64 s, s2us; | |
| 2510 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2511 LL_I2L(s, sb->st_mtime); | |
| 2512 LL_MUL(s, s, s2us); | |
| 2513 info->modifyTime = s; | |
| 2514 LL_I2L(s, sb->st_ctime); | |
| 2515 LL_MUL(s, s, s2us); | |
| 2516 info->creationTime = s; | |
| 2517 } | |
| 2518 | |
| 2519 static void _MD_set_fileinfo64_times( | |
| 2520 const _MDStat64 *sb, | |
| 2521 PRFileInfo64 *info) | |
| 2522 { | |
| 2523 PRInt64 s, s2us; | |
| 2524 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 2525 LL_I2L(s, sb->st_mtime); | |
| 2526 LL_MUL(s, s, s2us); | |
| 2527 info->modifyTime = s; | |
| 2528 LL_I2L(s, sb->st_ctime); | |
| 2529 LL_MUL(s, s, s2us); | |
| 2530 info->creationTime = s; | |
| 2531 } | |
| 2532 #else | |
| 2533 #error "I don't know yet" | |
| 2534 #endif | |
| 2535 | |
| 2536 static int _MD_convert_stat_to_fileinfo( | |
| 2537 const struct stat *sb, | |
| 2538 PRFileInfo *info) | |
| 2539 { | |
| 2540 if (S_IFREG & sb->st_mode) | |
| 2541 info->type = PR_FILE_FILE; | |
| 2542 else if (S_IFDIR & sb->st_mode) | |
| 2543 info->type = PR_FILE_DIRECTORY; | |
| 2544 else | |
| 2545 info->type = PR_FILE_OTHER; | |
| 2546 | |
| 2547 #if defined(_PR_HAVE_LARGE_OFF_T) | |
| 2548 if (0x7fffffffL < sb->st_size) | |
| 2549 { | |
| 2550 PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); | |
| 2551 return -1; | |
| 2552 } | |
| 2553 #endif /* defined(_PR_HAVE_LARGE_OFF_T) */ | |
| 2554 info->size = sb->st_size; | |
| 2555 | |
| 2556 _MD_set_fileinfo_times(sb, info); | |
| 2557 return 0; | |
| 2558 } /* _MD_convert_stat_to_fileinfo */ | |
| 2559 | |
| 2560 static int _MD_convert_stat64_to_fileinfo64( | |
| 2561 const _MDStat64 *sb, | |
| 2562 PRFileInfo64 *info) | |
| 2563 { | |
| 2564 if (S_IFREG & sb->st_mode) | |
| 2565 info->type = PR_FILE_FILE; | |
| 2566 else if (S_IFDIR & sb->st_mode) | |
| 2567 info->type = PR_FILE_DIRECTORY; | |
| 2568 else | |
| 2569 info->type = PR_FILE_OTHER; | |
| 2570 | |
| 2571 LL_I2L(info->size, sb->st_size); | |
| 2572 | |
| 2573 _MD_set_fileinfo64_times(sb, info); | |
| 2574 return 0; | |
| 2575 } /* _MD_convert_stat64_to_fileinfo64 */ | |
| 2576 | |
| 2577 PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info) | |
| 2578 { | |
| 2579 PRInt32 rv; | |
| 2580 struct stat sb; | |
| 2581 | |
| 2582 rv = stat(fn, &sb); | |
| 2583 if (rv < 0) | |
| 2584 _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); | |
| 2585 else if (NULL != info) | |
| 2586 rv = _MD_convert_stat_to_fileinfo(&sb, info); | |
| 2587 return rv; | |
| 2588 } | |
| 2589 | |
| 2590 PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info) | |
| 2591 { | |
| 2592 _MDStat64 sb; | |
| 2593 PRInt32 rv = _md_iovector._stat64(fn, &sb); | |
| 2594 if (rv < 0) | |
| 2595 _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); | |
| 2596 else if (NULL != info) | |
| 2597 rv = _MD_convert_stat64_to_fileinfo64(&sb, info); | |
| 2598 return rv; | |
| 2599 } | |
| 2600 | |
| 2601 PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info) | |
| 2602 { | |
| 2603 struct stat sb; | |
| 2604 PRInt32 rv = fstat(fd->secret->md.osfd, &sb); | |
| 2605 if (rv < 0) | |
| 2606 _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); | |
| 2607 else if (NULL != info) | |
| 2608 rv = _MD_convert_stat_to_fileinfo(&sb, info); | |
| 2609 return rv; | |
| 2610 } | |
| 2611 | |
| 2612 PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info) | |
| 2613 { | |
| 2614 _MDStat64 sb; | |
| 2615 PRInt32 rv = _md_iovector._fstat64(fd->secret->md.osfd, &sb); | |
| 2616 if (rv < 0) | |
| 2617 _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); | |
| 2618 else if (NULL != info) | |
| 2619 rv = _MD_convert_stat64_to_fileinfo64(&sb, info); | |
| 2620 return rv; | |
| 2621 } | |
| 2622 | |
| 2623 /* | |
| 2624 * _md_iovector._open64 must be initialized to 'open' so that _PR_InitLog can | |
| 2625 * open the log file during NSPR initialization, before _md_iovector is | |
| 2626 * initialized by _PR_MD_FINAL_INIT. This means the log file cannot be a | |
| 2627 * large file on some platforms. | |
| 2628 */ | |
| 2629 #ifdef SYMBIAN | |
| 2630 struct _MD_IOVector _md_iovector; /* Will crash if NSPR_LOG_FILE is set. */ | |
| 2631 #else | |
| 2632 struct _MD_IOVector _md_iovector = { open }; | |
| 2633 #endif | |
| 2634 | |
| 2635 /* | |
| 2636 ** These implementations are to emulate large file routines on systems that | |
| 2637 ** don't have them. Their goal is to check in case overflow occurs. Otherwise | |
| 2638 ** they will just operate as normal using 32-bit file routines. | |
| 2639 ** | |
| 2640 ** The checking might be pre- or post-op, depending on the semantics. | |
| 2641 */ | |
| 2642 | |
| 2643 #if defined(SOLARIS2_5) | |
| 2644 | |
| 2645 static PRIntn _MD_solaris25_fstat64(PRIntn osfd, _MDStat64 *buf) | |
| 2646 { | |
| 2647 PRInt32 rv; | |
| 2648 struct stat sb; | |
| 2649 | |
| 2650 rv = fstat(osfd, &sb); | |
| 2651 if (rv >= 0) | |
| 2652 { | |
| 2653 /* | |
| 2654 ** I'm only copying the fields that are immediately needed. | |
| 2655 ** If somebody else calls this function, some of the fields | |
| 2656 ** may not be defined. | |
| 2657 */ | |
| 2658 (void)memset(buf, 0, sizeof(_MDStat64)); | |
| 2659 buf->st_mode = sb.st_mode; | |
| 2660 buf->st_ctim = sb.st_ctim; | |
| 2661 buf->st_mtim = sb.st_mtim; | |
| 2662 buf->st_size = sb.st_size; | |
| 2663 } | |
| 2664 return rv; | |
| 2665 } /* _MD_solaris25_fstat64 */ | |
| 2666 | |
| 2667 static PRIntn _MD_solaris25_stat64(const char *fn, _MDStat64 *buf) | |
| 2668 { | |
| 2669 PRInt32 rv; | |
| 2670 struct stat sb; | |
| 2671 | |
| 2672 rv = stat(fn, &sb); | |
| 2673 if (rv >= 0) | |
| 2674 { | |
| 2675 /* | |
| 2676 ** I'm only copying the fields that are immediately needed. | |
| 2677 ** If somebody else calls this function, some of the fields | |
| 2678 ** may not be defined. | |
| 2679 */ | |
| 2680 (void)memset(buf, 0, sizeof(_MDStat64)); | |
| 2681 buf->st_mode = sb.st_mode; | |
| 2682 buf->st_ctim = sb.st_ctim; | |
| 2683 buf->st_mtim = sb.st_mtim; | |
| 2684 buf->st_size = sb.st_size; | |
| 2685 } | |
| 2686 return rv; | |
| 2687 } /* _MD_solaris25_stat64 */ | |
| 2688 #endif /* defined(SOLARIS2_5) */ | |
| 2689 | |
| 2690 #if defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) | |
| 2691 | |
| 2692 static PROffset64 _MD_Unix_lseek64(PRIntn osfd, PROffset64 offset, PRIntn whence
) | |
| 2693 { | |
| 2694 PRUint64 maxoff; | |
| 2695 PROffset64 rv = minus_one; | |
| 2696 LL_I2L(maxoff, 0x7fffffff); | |
| 2697 if (LL_CMP(offset, <=, maxoff)) | |
| 2698 { | |
| 2699 off_t off; | |
| 2700 LL_L2I(off, offset); | |
| 2701 LL_I2L(rv, lseek(osfd, off, whence)); | |
| 2702 } | |
| 2703 else errno = EFBIG; /* we can't go there */ | |
| 2704 return rv; | |
| 2705 } /* _MD_Unix_lseek64 */ | |
| 2706 | |
| 2707 static void* _MD_Unix_mmap64( | |
| 2708 void *addr, PRSize len, PRIntn prot, PRIntn flags, | |
| 2709 PRIntn fildes, PRInt64 offset) | |
| 2710 { | |
| 2711 PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); | |
| 2712 return NULL; | |
| 2713 } /* _MD_Unix_mmap64 */ | |
| 2714 #endif /* defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) */ | |
| 2715 | |
| 2716 /* Android <= 19 doesn't have mmap64. */ | |
| 2717 #if defined(ANDROID) && __ANDROID_API__ <= 19 | |
| 2718 PR_IMPORT(void) *__mmap2(void *, size_t, int, int, int, size_t); | |
| 2719 | |
| 2720 #define ANDROID_PAGE_SIZE 4096 | |
| 2721 | |
| 2722 static void * | |
| 2723 mmap64(void *addr, size_t len, int prot, int flags, int fd, loff_t offset) | |
| 2724 { | |
| 2725 if (offset & (ANDROID_PAGE_SIZE - 1)) { | |
| 2726 errno = EINVAL; | |
| 2727 return MAP_FAILED; | |
| 2728 } | |
| 2729 return __mmap2(addr, len, prot, flags, fd, offset / ANDROID_PAGE_SIZE); | |
| 2730 } | |
| 2731 #endif | |
| 2732 | |
| 2733 #if defined(OSF1) && defined(__GNUC__) | |
| 2734 | |
| 2735 /* | |
| 2736 * On OSF1 V5.0A, <sys/stat.h> defines stat and fstat as | |
| 2737 * macros when compiled under gcc, so it is rather tricky to | |
| 2738 * take the addresses of the real functions the macros expend | |
| 2739 * to. A simple solution is to define forwarder functions | |
| 2740 * and take the addresses of the forwarder functions instead. | |
| 2741 */ | |
| 2742 | |
| 2743 static int stat_forwarder(const char *path, struct stat *buffer) | |
| 2744 { | |
| 2745 return stat(path, buffer); | |
| 2746 } | |
| 2747 | |
| 2748 static int fstat_forwarder(int filedes, struct stat *buffer) | |
| 2749 { | |
| 2750 return fstat(filedes, buffer); | |
| 2751 } | |
| 2752 | |
| 2753 #endif | |
| 2754 | |
| 2755 static void _PR_InitIOV(void) | |
| 2756 { | |
| 2757 #if defined(SOLARIS2_5) | |
| 2758 PRLibrary *lib; | |
| 2759 void *open64_func; | |
| 2760 | |
| 2761 open64_func = PR_FindSymbolAndLibrary("open64", &lib); | |
| 2762 if (NULL != open64_func) | |
| 2763 { | |
| 2764 PR_ASSERT(NULL != lib); | |
| 2765 _md_iovector._open64 = (_MD_Open64)open64_func; | |
| 2766 _md_iovector._mmap64 = (_MD_Mmap64)PR_FindSymbol(lib, "mmap64"); | |
| 2767 _md_iovector._fstat64 = (_MD_Fstat64)PR_FindSymbol(lib, "fstat64"); | |
| 2768 _md_iovector._stat64 = (_MD_Stat64)PR_FindSymbol(lib, "stat64"); | |
| 2769 _md_iovector._lseek64 = (_MD_Lseek64)PR_FindSymbol(lib, "lseek64"); | |
| 2770 (void)PR_UnloadLibrary(lib); | |
| 2771 } | |
| 2772 else | |
| 2773 { | |
| 2774 _md_iovector._open64 = open; | |
| 2775 _md_iovector._mmap64 = _MD_Unix_mmap64; | |
| 2776 _md_iovector._fstat64 = _MD_solaris25_fstat64; | |
| 2777 _md_iovector._stat64 = _MD_solaris25_stat64; | |
| 2778 _md_iovector._lseek64 = _MD_Unix_lseek64; | |
| 2779 } | |
| 2780 #elif defined(_PR_NO_LARGE_FILES) | |
| 2781 _md_iovector._open64 = open; | |
| 2782 _md_iovector._mmap64 = _MD_Unix_mmap64; | |
| 2783 _md_iovector._fstat64 = fstat; | |
| 2784 _md_iovector._stat64 = stat; | |
| 2785 _md_iovector._lseek64 = _MD_Unix_lseek64; | |
| 2786 #elif defined(_PR_HAVE_OFF64_T) | |
| 2787 #if defined(IRIX5_3) || defined(ANDROID) | |
| 2788 /* | |
| 2789 * Android doesn't have open64. We pass the O_LARGEFILE flag to open | |
| 2790 * in _MD_open. | |
| 2791 */ | |
| 2792 _md_iovector._open64 = open; | |
| 2793 #else | |
| 2794 _md_iovector._open64 = open64; | |
| 2795 #endif | |
| 2796 _md_iovector._mmap64 = mmap64; | |
| 2797 _md_iovector._fstat64 = fstat64; | |
| 2798 _md_iovector._stat64 = stat64; | |
| 2799 _md_iovector._lseek64 = lseek64; | |
| 2800 #elif defined(_PR_HAVE_LARGE_OFF_T) | |
| 2801 _md_iovector._open64 = open; | |
| 2802 _md_iovector._mmap64 = mmap; | |
| 2803 #if defined(OSF1) && defined(__GNUC__) | |
| 2804 _md_iovector._fstat64 = fstat_forwarder; | |
| 2805 _md_iovector._stat64 = stat_forwarder; | |
| 2806 #else | |
| 2807 _md_iovector._fstat64 = fstat; | |
| 2808 _md_iovector._stat64 = stat; | |
| 2809 #endif | |
| 2810 _md_iovector._lseek64 = lseek; | |
| 2811 #else | |
| 2812 #error "I don't know yet" | |
| 2813 #endif | |
| 2814 LL_I2L(minus_one, -1); | |
| 2815 } /* _PR_InitIOV */ | |
| 2816 | |
| 2817 void _PR_UnixInit(void) | |
| 2818 { | |
| 2819 struct sigaction sigact; | |
| 2820 int rv; | |
| 2821 | |
| 2822 sigemptyset(&timer_set); | |
| 2823 | |
| 2824 #if !defined(_PR_PTHREADS) | |
| 2825 | |
| 2826 sigaddset(&timer_set, SIGALRM); | |
| 2827 sigemptyset(&empty_set); | |
| 2828 intr_timeout_ticks = | |
| 2829 PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS); | |
| 2830 | |
| 2831 #if defined(SOLARIS) || defined(IRIX) | |
| 2832 | |
| 2833 if (getenv("NSPR_SIGSEGV_HANDLE")) { | |
| 2834 sigact.sa_handler = sigsegvhandler; | |
| 2835 sigact.sa_flags = 0; | |
| 2836 sigact.sa_mask = timer_set; | |
| 2837 sigaction(SIGSEGV, &sigact, 0); | |
| 2838 } | |
| 2839 | |
| 2840 if (getenv("NSPR_SIGABRT_HANDLE")) { | |
| 2841 sigact.sa_handler = sigaborthandler; | |
| 2842 sigact.sa_flags = 0; | |
| 2843 sigact.sa_mask = timer_set; | |
| 2844 sigaction(SIGABRT, &sigact, 0); | |
| 2845 } | |
| 2846 | |
| 2847 if (getenv("NSPR_SIGBUS_HANDLE")) { | |
| 2848 sigact.sa_handler = sigbushandler; | |
| 2849 sigact.sa_flags = 0; | |
| 2850 sigact.sa_mask = timer_set; | |
| 2851 sigaction(SIGBUS, &sigact, 0); | |
| 2852 } | |
| 2853 | |
| 2854 #endif | |
| 2855 #endif /* !defined(_PR_PTHREADS) */ | |
| 2856 | |
| 2857 /* | |
| 2858 * Under HP-UX DCE threads, sigaction() installs a per-thread | |
| 2859 * handler, so we use sigvector() to install a process-wide | |
| 2860 * handler. | |
| 2861 */ | |
| 2862 #if defined(HPUX) && defined(_PR_DCETHREADS) | |
| 2863 { | |
| 2864 struct sigvec vec; | |
| 2865 | |
| 2866 vec.sv_handler = SIG_IGN; | |
| 2867 vec.sv_mask = 0; | |
| 2868 vec.sv_flags = 0; | |
| 2869 rv = sigvector(SIGPIPE, &vec, NULL); | |
| 2870 PR_ASSERT(0 == rv); | |
| 2871 } | |
| 2872 #else | |
| 2873 sigact.sa_handler = SIG_IGN; | |
| 2874 sigemptyset(&sigact.sa_mask); | |
| 2875 sigact.sa_flags = 0; | |
| 2876 rv = sigaction(SIGPIPE, &sigact, 0); | |
| 2877 PR_ASSERT(0 == rv); | |
| 2878 #endif /* HPUX && _PR_DCETHREADS */ | |
| 2879 | |
| 2880 _pr_rename_lock = PR_NewLock(); | |
| 2881 PR_ASSERT(NULL != _pr_rename_lock); | |
| 2882 _pr_Xfe_mon = PR_NewMonitor(); | |
| 2883 PR_ASSERT(NULL != _pr_Xfe_mon); | |
| 2884 | |
| 2885 _PR_InitIOV(); /* one last hack */ | |
| 2886 } | |
| 2887 | |
| 2888 void _PR_UnixCleanup(void) | |
| 2889 { | |
| 2890 if (_pr_rename_lock) { | |
| 2891 PR_DestroyLock(_pr_rename_lock); | |
| 2892 _pr_rename_lock = NULL; | |
| 2893 } | |
| 2894 if (_pr_Xfe_mon) { | |
| 2895 PR_DestroyMonitor(_pr_Xfe_mon); | |
| 2896 _pr_Xfe_mon = NULL; | |
| 2897 } | |
| 2898 } | |
| 2899 | |
| 2900 #if !defined(_PR_PTHREADS) | |
| 2901 | |
| 2902 /* | |
| 2903 * Variables used by the GC code, initialized in _MD_InitSegs(). | |
| 2904 */ | |
| 2905 static PRInt32 _pr_zero_fd = -1; | |
| 2906 static PRLock *_pr_md_lock = NULL; | |
| 2907 | |
| 2908 /* | |
| 2909 * _MD_InitSegs -- | |
| 2910 * | |
| 2911 * This is Unix's version of _PR_MD_INIT_SEGS(), which is | |
| 2912 * called by _PR_InitSegs(), which in turn is called by | |
| 2913 * PR_Init(). | |
| 2914 */ | |
| 2915 void _MD_InitSegs(void) | |
| 2916 { | |
| 2917 #ifdef DEBUG | |
| 2918 /* | |
| 2919 ** Disable using mmap(2) if NSPR_NO_MMAP is set | |
| 2920 */ | |
| 2921 if (getenv("NSPR_NO_MMAP")) { | |
| 2922 _pr_zero_fd = -2; | |
| 2923 return; | |
| 2924 } | |
| 2925 #endif | |
| 2926 _pr_zero_fd = open("/dev/zero",O_RDWR , 0); | |
| 2927 /* Prevent the fd from being inherited by child processes */ | |
| 2928 fcntl(_pr_zero_fd, F_SETFD, FD_CLOEXEC); | |
| 2929 _pr_md_lock = PR_NewLock(); | |
| 2930 } | |
| 2931 | |
| 2932 PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr) | |
| 2933 { | |
| 2934 static char *lastaddr = (char*) _PR_STACK_VMBASE; | |
| 2935 PRStatus retval = PR_SUCCESS; | |
| 2936 int prot; | |
| 2937 void *rv; | |
| 2938 | |
| 2939 PR_ASSERT(seg != 0); | |
| 2940 PR_ASSERT(size != 0); | |
| 2941 | |
| 2942 PR_Lock(_pr_md_lock); | |
| 2943 if (_pr_zero_fd < 0) { | |
| 2944 from_heap: | |
| 2945 seg->vaddr = PR_MALLOC(size); | |
| 2946 if (!seg->vaddr) { | |
| 2947 retval = PR_FAILURE; | |
| 2948 } | |
| 2949 else { | |
| 2950 seg->size = size; | |
| 2951 } | |
| 2952 goto exit; | |
| 2953 } | |
| 2954 | |
| 2955 prot = PROT_READ|PROT_WRITE; | |
| 2956 /* | |
| 2957 * On Alpha Linux, the user-level thread stack needs | |
| 2958 * to be made executable because longjmp/signal seem | |
| 2959 * to put machine instructions on the stack. | |
| 2960 */ | |
| 2961 #if defined(LINUX) && defined(__alpha) | |
| 2962 prot |= PROT_EXEC; | |
| 2963 #endif | |
| 2964 rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot, | |
| 2965 _MD_MMAP_FLAGS, | |
| 2966 _pr_zero_fd, 0); | |
| 2967 if (rv == (void*)-1) { | |
| 2968 goto from_heap; | |
| 2969 } | |
| 2970 lastaddr += size; | |
| 2971 seg->vaddr = rv; | |
| 2972 seg->size = size; | |
| 2973 seg->flags = _PR_SEG_VM; | |
| 2974 | |
| 2975 exit: | |
| 2976 PR_Unlock(_pr_md_lock); | |
| 2977 return retval; | |
| 2978 } | |
| 2979 | |
| 2980 void _MD_FreeSegment(PRSegment *seg) | |
| 2981 { | |
| 2982 if (seg->flags & _PR_SEG_VM) | |
| 2983 (void) munmap(seg->vaddr, seg->size); | |
| 2984 else | |
| 2985 PR_DELETE(seg->vaddr); | |
| 2986 } | |
| 2987 | |
| 2988 #endif /* _PR_PTHREADS */ | |
| 2989 | |
| 2990 /* | |
| 2991 *----------------------------------------------------------------------- | |
| 2992 * | |
| 2993 * PR_Now -- | |
| 2994 * | |
| 2995 * Returns the current time in microseconds since the epoch. | |
| 2996 * The epoch is midnight January 1, 1970 GMT. | |
| 2997 * The implementation is machine dependent. This is the Unix | |
| 2998 * implementation. | |
| 2999 * Cf. time_t time(time_t *tp) | |
| 3000 * | |
| 3001 *----------------------------------------------------------------------- | |
| 3002 */ | |
| 3003 | |
| 3004 PR_IMPLEMENT(PRTime) | |
| 3005 PR_Now(void) | |
| 3006 { | |
| 3007 struct timeval tv; | |
| 3008 PRInt64 s, us, s2us; | |
| 3009 | |
| 3010 GETTIMEOFDAY(&tv); | |
| 3011 LL_I2L(s2us, PR_USEC_PER_SEC); | |
| 3012 LL_I2L(s, tv.tv_sec); | |
| 3013 LL_I2L(us, tv.tv_usec); | |
| 3014 LL_MUL(s, s, s2us); | |
| 3015 LL_ADD(s, s, us); | |
| 3016 return s; | |
| 3017 } | |
| 3018 | |
| 3019 #if defined(_MD_INTERVAL_USE_GTOD) | |
| 3020 /* | |
| 3021 * This version of interval times is based on the time of day | |
| 3022 * capability offered by the system. This isn't valid for two reasons: | |
| 3023 * 1) The time of day is neither linear nor montonically increasing | |
| 3024 * 2) The units here are milliseconds. That's not appropriate for our use. | |
| 3025 */ | |
| 3026 PRIntervalTime _PR_UNIX_GetInterval() | |
| 3027 { | |
| 3028 struct timeval time; | |
| 3029 PRIntervalTime ticks; | |
| 3030 | |
| 3031 (void)GETTIMEOFDAY(&time); /* fallicy of course */ | |
| 3032 ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds
*/ | |
| 3033 ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */ | |
| 3034 return ticks; | |
| 3035 } /* _PR_UNIX_GetInterval */ | |
| 3036 | |
| 3037 PRIntervalTime _PR_UNIX_TicksPerSecond() | |
| 3038 { | |
| 3039 return 1000; /* this needs some work :) */ | |
| 3040 } | |
| 3041 #endif | |
| 3042 | |
| 3043 #if defined(_PR_HAVE_CLOCK_MONOTONIC) | |
| 3044 PRIntervalTime _PR_UNIX_GetInterval2() | |
| 3045 { | |
| 3046 struct timespec time; | |
| 3047 PRIntervalTime ticks; | |
| 3048 | |
| 3049 if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { | |
| 3050 fprintf(stderr, "clock_gettime failed: %d\n", errno); | |
| 3051 abort(); | |
| 3052 } | |
| 3053 | |
| 3054 ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; | |
| 3055 ticks += (PRUint32)time.tv_nsec / PR_NSEC_PER_MSEC; | |
| 3056 return ticks; | |
| 3057 } | |
| 3058 | |
| 3059 PRIntervalTime _PR_UNIX_TicksPerSecond2() | |
| 3060 { | |
| 3061 return 1000; | |
| 3062 } | |
| 3063 #endif | |
| 3064 | |
| 3065 #if !defined(_PR_PTHREADS) | |
| 3066 /* | |
| 3067 * Wait for I/O on multiple descriptors. | |
| 3068 * | |
| 3069 * Return 0 if timed out, return -1 if interrupted, | |
| 3070 * else return the number of ready descriptors. | |
| 3071 */ | |
| 3072 PRInt32 _PR_WaitForMultipleFDs( | |
| 3073 _PRUnixPollDesc *unixpds, | |
| 3074 PRInt32 pdcnt, | |
| 3075 PRIntervalTime timeout) | |
| 3076 { | |
| 3077 PRPollQueue pq; | |
| 3078 PRIntn is; | |
| 3079 PRInt32 rv; | |
| 3080 _PRCPU *io_cpu; | |
| 3081 _PRUnixPollDesc *unixpd, *eunixpd; | |
| 3082 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
| 3083 | |
| 3084 PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); | |
| 3085 | |
| 3086 if (_PR_PENDING_INTERRUPT(me)) { | |
| 3087 me->flags &= ~_PR_INTERRUPT; | |
| 3088 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 3089 return -1; | |
| 3090 } | |
| 3091 | |
| 3092 pq.pds = unixpds; | |
| 3093 pq.npds = pdcnt; | |
| 3094 | |
| 3095 _PR_INTSOFF(is); | |
| 3096 _PR_MD_IOQ_LOCK(); | |
| 3097 _PR_THREAD_LOCK(me); | |
| 3098 | |
| 3099 pq.thr = me; | |
| 3100 io_cpu = me->cpu; | |
| 3101 pq.on_ioq = PR_TRUE; | |
| 3102 pq.timeout = timeout; | |
| 3103 _PR_ADD_TO_IOQ(pq, me->cpu); | |
| 3104 | |
| 3105 #if !defined(_PR_USE_POLL) | |
| 3106 eunixpd = unixpds + pdcnt; | |
| 3107 for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { | |
| 3108 PRInt32 osfd = unixpd->osfd; | |
| 3109 if (unixpd->in_flags & _PR_UNIX_POLL_READ) { | |
| 3110 FD_SET(osfd, &_PR_FD_READ_SET(me->cpu)); | |
| 3111 _PR_FD_READ_CNT(me->cpu)[osfd]++; | |
| 3112 } | |
| 3113 if (unixpd->in_flags & _PR_UNIX_POLL_WRITE) { | |
| 3114 FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
| 3115 (_PR_FD_WRITE_CNT(me->cpu))[osfd]++; | |
| 3116 } | |
| 3117 if (unixpd->in_flags & _PR_UNIX_POLL_EXCEPT) { | |
| 3118 FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
| 3119 (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++; | |
| 3120 } | |
| 3121 if (osfd > _PR_IOQ_MAX_OSFD(me->cpu)) { | |
| 3122 _PR_IOQ_MAX_OSFD(me->cpu) = osfd; | |
| 3123 } | |
| 3124 } | |
| 3125 #endif /* !defined(_PR_USE_POLL) */ | |
| 3126 | |
| 3127 if (_PR_IOQ_TIMEOUT(me->cpu) > timeout) { | |
| 3128 _PR_IOQ_TIMEOUT(me->cpu) = timeout; | |
| 3129 } | |
| 3130 | |
| 3131 _PR_IOQ_OSFD_CNT(me->cpu) += pdcnt; | |
| 3132 | |
| 3133 _PR_SLEEPQ_LOCK(me->cpu); | |
| 3134 _PR_ADD_SLEEPQ(me, timeout); | |
| 3135 me->state = _PR_IO_WAIT; | |
| 3136 me->io_pending = PR_TRUE; | |
| 3137 me->io_suspended = PR_FALSE; | |
| 3138 _PR_SLEEPQ_UNLOCK(me->cpu); | |
| 3139 _PR_THREAD_UNLOCK(me); | |
| 3140 _PR_MD_IOQ_UNLOCK(); | |
| 3141 | |
| 3142 _PR_MD_WAIT(me, timeout); | |
| 3143 | |
| 3144 me->io_pending = PR_FALSE; | |
| 3145 me->io_suspended = PR_FALSE; | |
| 3146 | |
| 3147 /* | |
| 3148 * This thread should run on the same cpu on which it was blocked; when | |
| 3149 * the IO request times out the fd sets and fd counts for the | |
| 3150 * cpu are updated below. | |
| 3151 */ | |
| 3152 PR_ASSERT(me->cpu == io_cpu); | |
| 3153 | |
| 3154 /* | |
| 3155 ** If we timed out the pollq might still be on the ioq. Remove it | |
| 3156 ** before continuing. | |
| 3157 */ | |
| 3158 if (pq.on_ioq) { | |
| 3159 _PR_MD_IOQ_LOCK(); | |
| 3160 /* | |
| 3161 * Need to check pq.on_ioq again | |
| 3162 */ | |
| 3163 if (pq.on_ioq) { | |
| 3164 PR_REMOVE_LINK(&pq.links); | |
| 3165 #ifndef _PR_USE_POLL | |
| 3166 eunixpd = unixpds + pdcnt; | |
| 3167 for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { | |
| 3168 PRInt32 osfd = unixpd->osfd; | |
| 3169 PRInt16 in_flags = unixpd->in_flags; | |
| 3170 | |
| 3171 if (in_flags & _PR_UNIX_POLL_READ) { | |
| 3172 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) | |
| 3173 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); | |
| 3174 } | |
| 3175 if (in_flags & _PR_UNIX_POLL_WRITE) { | |
| 3176 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) | |
| 3177 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
| 3178 } | |
| 3179 if (in_flags & _PR_UNIX_POLL_EXCEPT) { | |
| 3180 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) | |
| 3181 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
| 3182 } | |
| 3183 } | |
| 3184 #endif /* _PR_USE_POLL */ | |
| 3185 PR_ASSERT(pq.npds == pdcnt); | |
| 3186 _PR_IOQ_OSFD_CNT(me->cpu) -= pdcnt; | |
| 3187 PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); | |
| 3188 } | |
| 3189 _PR_MD_IOQ_UNLOCK(); | |
| 3190 } | |
| 3191 /* XXX Should we use _PR_FAST_INTSON or _PR_INTSON? */ | |
| 3192 if (1 == pdcnt) { | |
| 3193 _PR_FAST_INTSON(is); | |
| 3194 } else { | |
| 3195 _PR_INTSON(is); | |
| 3196 } | |
| 3197 | |
| 3198 if (_PR_PENDING_INTERRUPT(me)) { | |
| 3199 me->flags &= ~_PR_INTERRUPT; | |
| 3200 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
| 3201 return -1; | |
| 3202 } | |
| 3203 | |
| 3204 rv = 0; | |
| 3205 if (pq.on_ioq == PR_FALSE) { | |
| 3206 /* Count the number of ready descriptors */ | |
| 3207 while (--pdcnt >= 0) { | |
| 3208 if (unixpds->out_flags != 0) { | |
| 3209 rv++; | |
| 3210 } | |
| 3211 unixpds++; | |
| 3212 } | |
| 3213 } | |
| 3214 | |
| 3215 return rv; | |
| 3216 } | |
| 3217 | |
| 3218 /* | |
| 3219 * Unblock threads waiting for I/O | |
| 3220 * used when interrupting threads | |
| 3221 * | |
| 3222 * NOTE: The thread lock should held when this function is called. | |
| 3223 * On return, the thread lock is released. | |
| 3224 */ | |
| 3225 void _PR_Unblock_IO_Wait(PRThread *thr) | |
| 3226 { | |
| 3227 int pri = thr->priority; | |
| 3228 _PRCPU *cpu = thr->cpu; | |
| 3229 | |
| 3230 /* | |
| 3231 * GLOBAL threads wakeup periodically to check for interrupt | |
| 3232 */ | |
| 3233 if (_PR_IS_NATIVE_THREAD(thr)) { | |
| 3234 _PR_THREAD_UNLOCK(thr); | |
| 3235 return; | |
| 3236 } | |
| 3237 | |
| 3238 PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ)); | |
| 3239 _PR_SLEEPQ_LOCK(cpu); | |
| 3240 _PR_DEL_SLEEPQ(thr, PR_TRUE); | |
| 3241 _PR_SLEEPQ_UNLOCK(cpu); | |
| 3242 | |
| 3243 PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD)); | |
| 3244 thr->state = _PR_RUNNABLE; | |
| 3245 _PR_RUNQ_LOCK(cpu); | |
| 3246 _PR_ADD_RUNQ(thr, cpu, pri); | |
| 3247 _PR_RUNQ_UNLOCK(cpu); | |
| 3248 _PR_THREAD_UNLOCK(thr); | |
| 3249 _PR_MD_WAKEUP_WAITER(thr); | |
| 3250 } | |
| 3251 #endif /* !defined(_PR_PTHREADS) */ | |
| 3252 | |
| 3253 /* | |
| 3254 * When a nonblocking connect has completed, determine whether it | |
| 3255 * succeeded or failed, and if it failed, what the error code is. | |
| 3256 * | |
| 3257 * The function returns the error code. An error code of 0 means | |
| 3258 * that the nonblocking connect succeeded. | |
| 3259 */ | |
| 3260 | |
| 3261 int _MD_unix_get_nonblocking_connect_error(int osfd) | |
| 3262 { | |
| 3263 #if defined(NTO) | |
| 3264 /* Neutrino does not support the SO_ERROR socket option */ | |
| 3265 PRInt32 rv; | |
| 3266 PRNetAddr addr; | |
| 3267 _PRSockLen_t addrlen = sizeof(addr); | |
| 3268 | |
| 3269 /* Test to see if we are using the Tiny TCP/IP Stack or the Full one. */ | |
| 3270 struct statvfs superblock; | |
| 3271 rv = fstatvfs(osfd, &superblock); | |
| 3272 if (rv == 0) { | |
| 3273 if (strcmp(superblock.f_basetype, "ttcpip") == 0) { | |
| 3274 /* Using the Tiny Stack! */ | |
| 3275 rv = getpeername(osfd, (struct sockaddr *) &addr, | |
| 3276 (_PRSockLen_t *) &addrlen); | |
| 3277 if (rv == -1) { | |
| 3278 int errno_copy = errno; /* make a copy so I don't | |
| 3279 * accidentally reset */ | |
| 3280 | |
| 3281 if (errno_copy == ENOTCONN) { | |
| 3282 struct stat StatInfo; | |
| 3283 rv = fstat(osfd, &StatInfo); | |
| 3284 if (rv == 0) { | |
| 3285 time_t current_time = time(NULL); | |
| 3286 | |
| 3287 /* | |
| 3288 * this is a real hack, can't explain why it | |
| 3289 * works it just does | |
| 3290 */ | |
| 3291 if (abs(current_time - StatInfo.st_atime) < 5) { | |
| 3292 return ECONNREFUSED; | |
| 3293 } else { | |
| 3294 return ETIMEDOUT; | |
| 3295 } | |
| 3296 } else { | |
| 3297 return ECONNREFUSED; | |
| 3298 } | |
| 3299 } else { | |
| 3300 return errno_copy; | |
| 3301 } | |
| 3302 } else { | |
| 3303 /* No Error */ | |
| 3304 return 0; | |
| 3305 } | |
| 3306 } else { | |
| 3307 /* Have the FULL Stack which supports SO_ERROR */ | |
| 3308 /* Hasn't been written yet, never been tested! */ | |
| 3309 /* Jerry.Kirk@Nexwarecorp.com */ | |
| 3310 | |
| 3311 int err; | |
| 3312 _PRSockLen_t optlen = sizeof(err); | |
| 3313 | |
| 3314 if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, | |
| 3315 (char *) &err, &optlen) == -1) { | |
| 3316 return errno; | |
| 3317 } else { | |
| 3318 return err; | |
| 3319 } | |
| 3320 } | |
| 3321 } else { | |
| 3322 return ECONNREFUSED; | |
| 3323 } | |
| 3324 #elif defined(UNIXWARE) | |
| 3325 /* | |
| 3326 * getsockopt() fails with EPIPE, so use getmsg() instead. | |
| 3327 */ | |
| 3328 | |
| 3329 int rv; | |
| 3330 int flags = 0; | |
| 3331 rv = getmsg(osfd, NULL, NULL, &flags); | |
| 3332 PR_ASSERT(-1 == rv || 0 == rv); | |
| 3333 if (-1 == rv && errno != EAGAIN && errno != EWOULDBLOCK) { | |
| 3334 return errno; | |
| 3335 } | |
| 3336 return 0; /* no error */ | |
| 3337 #else | |
| 3338 int err; | |
| 3339 _PRSockLen_t optlen = sizeof(err); | |
| 3340 if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char *) &err, &optlen) == -1) { | |
| 3341 return errno; | |
| 3342 } else { | |
| 3343 return err; | |
| 3344 } | |
| 3345 #endif | |
| 3346 } | |
| 3347 | |
| 3348 /************************************************************************/ | |
| 3349 | |
| 3350 /* | |
| 3351 ** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread | |
| 3352 ** safe. Unfortunately, neither is mozilla. To make these programs work | |
| 3353 ** in a pre-emptive threaded environment, we need to use a lock. | |
| 3354 */ | |
| 3355 | |
| 3356 void PR_XLock(void) | |
| 3357 { | |
| 3358 PR_EnterMonitor(_pr_Xfe_mon); | |
| 3359 } | |
| 3360 | |
| 3361 void PR_XUnlock(void) | |
| 3362 { | |
| 3363 PR_ExitMonitor(_pr_Xfe_mon); | |
| 3364 } | |
| 3365 | |
| 3366 PRBool PR_XIsLocked(void) | |
| 3367 { | |
| 3368 return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE; | |
| 3369 } | |
| 3370 | |
| 3371 void PR_XWait(int ms) | |
| 3372 { | |
| 3373 PR_Wait(_pr_Xfe_mon, PR_MillisecondsToInterval(ms)); | |
| 3374 } | |
| 3375 | |
| 3376 void PR_XNotify(void) | |
| 3377 { | |
| 3378 PR_Notify(_pr_Xfe_mon); | |
| 3379 } | |
| 3380 | |
| 3381 void PR_XNotifyAll(void) | |
| 3382 { | |
| 3383 PR_NotifyAll(_pr_Xfe_mon); | |
| 3384 } | |
| 3385 | |
| 3386 #if defined(HAVE_FCNTL_FILE_LOCKING) | |
| 3387 | |
| 3388 PRStatus | |
| 3389 _MD_LockFile(PRInt32 f) | |
| 3390 { | |
| 3391 PRInt32 rv; | |
| 3392 struct flock arg; | |
| 3393 | |
| 3394 arg.l_type = F_WRLCK; | |
| 3395 arg.l_whence = SEEK_SET; | |
| 3396 arg.l_start = 0; | |
| 3397 arg.l_len = 0; /* until EOF */ | |
| 3398 rv = fcntl(f, F_SETLKW, &arg); | |
| 3399 if (rv == 0) | |
| 3400 return PR_SUCCESS; | |
| 3401 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
| 3402 return PR_FAILURE; | |
| 3403 } | |
| 3404 | |
| 3405 PRStatus | |
| 3406 _MD_TLockFile(PRInt32 f) | |
| 3407 { | |
| 3408 PRInt32 rv; | |
| 3409 struct flock arg; | |
| 3410 | |
| 3411 arg.l_type = F_WRLCK; | |
| 3412 arg.l_whence = SEEK_SET; | |
| 3413 arg.l_start = 0; | |
| 3414 arg.l_len = 0; /* until EOF */ | |
| 3415 rv = fcntl(f, F_SETLK, &arg); | |
| 3416 if (rv == 0) | |
| 3417 return PR_SUCCESS; | |
| 3418 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
| 3419 return PR_FAILURE; | |
| 3420 } | |
| 3421 | |
| 3422 PRStatus | |
| 3423 _MD_UnlockFile(PRInt32 f) | |
| 3424 { | |
| 3425 PRInt32 rv; | |
| 3426 struct flock arg; | |
| 3427 | |
| 3428 arg.l_type = F_UNLCK; | |
| 3429 arg.l_whence = SEEK_SET; | |
| 3430 arg.l_start = 0; | |
| 3431 arg.l_len = 0; /* until EOF */ | |
| 3432 rv = fcntl(f, F_SETLK, &arg); | |
| 3433 if (rv == 0) | |
| 3434 return PR_SUCCESS; | |
| 3435 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
| 3436 return PR_FAILURE; | |
| 3437 } | |
| 3438 | |
| 3439 #elif defined(HAVE_BSD_FLOCK) | |
| 3440 | |
| 3441 #include <sys/file.h> | |
| 3442 | |
| 3443 PRStatus | |
| 3444 _MD_LockFile(PRInt32 f) | |
| 3445 { | |
| 3446 PRInt32 rv; | |
| 3447 rv = flock(f, LOCK_EX); | |
| 3448 if (rv == 0) | |
| 3449 return PR_SUCCESS; | |
| 3450 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
| 3451 return PR_FAILURE; | |
| 3452 } | |
| 3453 | |
| 3454 PRStatus | |
| 3455 _MD_TLockFile(PRInt32 f) | |
| 3456 { | |
| 3457 PRInt32 rv; | |
| 3458 rv = flock(f, LOCK_EX|LOCK_NB); | |
| 3459 if (rv == 0) | |
| 3460 return PR_SUCCESS; | |
| 3461 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
| 3462 return PR_FAILURE; | |
| 3463 } | |
| 3464 | |
| 3465 PRStatus | |
| 3466 _MD_UnlockFile(PRInt32 f) | |
| 3467 { | |
| 3468 PRInt32 rv; | |
| 3469 rv = flock(f, LOCK_UN); | |
| 3470 if (rv == 0) | |
| 3471 return PR_SUCCESS; | |
| 3472 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
| 3473 return PR_FAILURE; | |
| 3474 } | |
| 3475 #else | |
| 3476 | |
| 3477 PRStatus | |
| 3478 _MD_LockFile(PRInt32 f) | |
| 3479 { | |
| 3480 PRInt32 rv; | |
| 3481 rv = lockf(f, F_LOCK, 0); | |
| 3482 if (rv == 0) | |
| 3483 return PR_SUCCESS; | |
| 3484 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); | |
| 3485 return PR_FAILURE; | |
| 3486 } | |
| 3487 | |
| 3488 PRStatus | |
| 3489 _MD_TLockFile(PRInt32 f) | |
| 3490 { | |
| 3491 PRInt32 rv; | |
| 3492 rv = lockf(f, F_TLOCK, 0); | |
| 3493 if (rv == 0) | |
| 3494 return PR_SUCCESS; | |
| 3495 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); | |
| 3496 return PR_FAILURE; | |
| 3497 } | |
| 3498 | |
| 3499 PRStatus | |
| 3500 _MD_UnlockFile(PRInt32 f) | |
| 3501 { | |
| 3502 PRInt32 rv; | |
| 3503 rv = lockf(f, F_ULOCK, 0); | |
| 3504 if (rv == 0) | |
| 3505 return PR_SUCCESS; | |
| 3506 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); | |
| 3507 return PR_FAILURE; | |
| 3508 } | |
| 3509 #endif | |
| 3510 | |
| 3511 PRStatus _MD_gethostname(char *name, PRUint32 namelen) | |
| 3512 { | |
| 3513 PRIntn rv; | |
| 3514 | |
| 3515 rv = gethostname(name, namelen); | |
| 3516 if (0 == rv) { | |
| 3517 return PR_SUCCESS; | |
| 3518 } | |
| 3519 _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO()); | |
| 3520 return PR_FAILURE; | |
| 3521 } | |
| 3522 | |
| 3523 PRStatus _MD_getsysinfo(PRSysInfo cmd, char *name, PRUint32 namelen) | |
| 3524 { | |
| 3525 struct utsname info; | |
| 3526 | |
| 3527 PR_ASSERT((cmd == PR_SI_SYSNAME) || (cmd == PR_SI_RELEASE)); | |
| 3528 | |
| 3529 if (uname(&info) == -1) { | |
| 3530 _PR_MD_MAP_DEFAULT_ERROR(errno); | |
| 3531 return PR_FAILURE; | |
| 3532 } | |
| 3533 if (PR_SI_SYSNAME == cmd) | |
| 3534 (void)PR_snprintf(name, namelen, info.sysname); | |
| 3535 else if (PR_SI_RELEASE == cmd) | |
| 3536 (void)PR_snprintf(name, namelen, info.release); | |
| 3537 else | |
| 3538 return PR_FAILURE; | |
| 3539 return PR_SUCCESS; | |
| 3540 } | |
| 3541 | |
| 3542 /* | |
| 3543 ******************************************************************* | |
| 3544 * | |
| 3545 * Memory-mapped files | |
| 3546 * | |
| 3547 ******************************************************************* | |
| 3548 */ | |
| 3549 | |
| 3550 PRStatus _MD_CreateFileMap(PRFileMap *fmap, PRInt64 size) | |
| 3551 { | |
| 3552 PRFileInfo info; | |
| 3553 PRUint32 sz; | |
| 3554 | |
| 3555 LL_L2UI(sz, size); | |
| 3556 if (sz) { | |
| 3557 if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) { | |
| 3558 return PR_FAILURE; | |
| 3559 } | |
| 3560 if (sz > info.size) { | |
| 3561 /* | |
| 3562 * Need to extend the file | |
| 3563 */ | |
| 3564 if (fmap->prot != PR_PROT_READWRITE) { | |
| 3565 PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0); | |
| 3566 return PR_FAILURE; | |
| 3567 } | |
| 3568 if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) { | |
| 3569 return PR_FAILURE; | |
| 3570 } | |
| 3571 if (PR_Write(fmap->fd, "", 1) != 1) { | |
| 3572 return PR_FAILURE; | |
| 3573 } | |
| 3574 } | |
| 3575 } | |
| 3576 if (fmap->prot == PR_PROT_READONLY) { | |
| 3577 fmap->md.prot = PROT_READ; | |
| 3578 #ifdef OSF1V4_MAP_PRIVATE_BUG | |
| 3579 /* | |
| 3580 * Use MAP_SHARED to work around a bug in OSF1 V4.0D | |
| 3581 * (QAR 70220 in the OSF_QAR database) that results in | |
| 3582 * corrupted data in the memory-mapped region. This | |
| 3583 * bug is fixed in V5.0. | |
| 3584 */ | |
| 3585 fmap->md.flags = MAP_SHARED; | |
| 3586 #else | |
| 3587 fmap->md.flags = MAP_PRIVATE; | |
| 3588 #endif | |
| 3589 } else if (fmap->prot == PR_PROT_READWRITE) { | |
| 3590 fmap->md.prot = PROT_READ | PROT_WRITE; | |
| 3591 fmap->md.flags = MAP_SHARED; | |
| 3592 } else { | |
| 3593 PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY); | |
| 3594 fmap->md.prot = PROT_READ | PROT_WRITE; | |
| 3595 fmap->md.flags = MAP_PRIVATE; | |
| 3596 } | |
| 3597 return PR_SUCCESS; | |
| 3598 } | |
| 3599 | |
| 3600 void * _MD_MemMap( | |
| 3601 PRFileMap *fmap, | |
| 3602 PRInt64 offset, | |
| 3603 PRUint32 len) | |
| 3604 { | |
| 3605 PRInt32 off; | |
| 3606 void *addr; | |
| 3607 | |
| 3608 LL_L2I(off, offset); | |
| 3609 if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags, | |
| 3610 fmap->fd->secret->md.osfd, off)) == (void *) -1) { | |
| 3611 _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO()); | |
| 3612 addr = NULL; | |
| 3613 } | |
| 3614 return addr; | |
| 3615 } | |
| 3616 | |
| 3617 PRStatus _MD_MemUnmap(void *addr, PRUint32 len) | |
| 3618 { | |
| 3619 if (munmap(addr, len) == 0) { | |
| 3620 return PR_SUCCESS; | |
| 3621 } | |
| 3622 _PR_MD_MAP_DEFAULT_ERROR(errno); | |
| 3623 return PR_FAILURE; | |
| 3624 } | |
| 3625 | |
| 3626 PRStatus _MD_CloseFileMap(PRFileMap *fmap) | |
| 3627 { | |
| 3628 if ( PR_TRUE == fmap->md.isAnonFM ) { | |
| 3629 PRStatus rc = PR_Close( fmap->fd ); | |
| 3630 if ( PR_FAILURE == rc ) { | |
| 3631 PR_LOG( _pr_io_lm, PR_LOG_DEBUG, | |
| 3632 ("_MD_CloseFileMap(): error closing anonymnous file map osfd")); | |
| 3633 return PR_FAILURE; | |
| 3634 } | |
| 3635 } | |
| 3636 PR_DELETE(fmap); | |
| 3637 return PR_SUCCESS; | |
| 3638 } | |
| 3639 | |
| 3640 PRStatus _MD_SyncMemMap( | |
| 3641 PRFileDesc *fd, | |
| 3642 void *addr, | |
| 3643 PRUint32 len) | |
| 3644 { | |
| 3645 /* msync(..., MS_SYNC) alone is sufficient to flush modified data to disk | |
| 3646 * synchronously. It is not necessary to call fsync. */ | |
| 3647 if (msync(addr, len, MS_SYNC) == 0) { | |
| 3648 return PR_SUCCESS; | |
| 3649 } | |
| 3650 _PR_MD_MAP_DEFAULT_ERROR(errno); | |
| 3651 return PR_FAILURE; | |
| 3652 } | |
| 3653 | |
| 3654 #if defined(_PR_NEED_FAKE_POLL) | |
| 3655 | |
| 3656 /* | |
| 3657 * Some platforms don't have poll(). For easier porting of code | |
| 3658 * that calls poll(), we emulate poll() using select(). | |
| 3659 */ | |
| 3660 | |
| 3661 int poll(struct pollfd *filedes, unsigned long nfds, int timeout) | |
| 3662 { | |
| 3663 int i; | |
| 3664 int rv; | |
| 3665 int maxfd; | |
| 3666 fd_set rd, wr, ex; | |
| 3667 struct timeval tv, *tvp; | |
| 3668 | |
| 3669 if (timeout < 0 && timeout != -1) { | |
| 3670 errno = EINVAL; | |
| 3671 return -1; | |
| 3672 } | |
| 3673 | |
| 3674 if (timeout == -1) { | |
| 3675 tvp = NULL; | |
| 3676 } else { | |
| 3677 tv.tv_sec = timeout / 1000; | |
| 3678 tv.tv_usec = (timeout % 1000) * 1000; | |
| 3679 tvp = &tv; | |
| 3680 } | |
| 3681 | |
| 3682 maxfd = -1; | |
| 3683 FD_ZERO(&rd); | |
| 3684 FD_ZERO(&wr); | |
| 3685 FD_ZERO(&ex); | |
| 3686 | |
| 3687 for (i = 0; i < nfds; i++) { | |
| 3688 int osfd = filedes[i].fd; | |
| 3689 int events = filedes[i].events; | |
| 3690 PRBool fdHasEvent = PR_FALSE; | |
| 3691 | |
| 3692 if (osfd < 0) { | |
| 3693 continue; /* Skip this osfd. */ | |
| 3694 } | |
| 3695 | |
| 3696 /* | |
| 3697 * Map the poll events to the select fd_sets. | |
| 3698 * POLLIN, POLLRDNORM ===> readable | |
| 3699 * POLLOUT, POLLWRNORM ===> writable | |
| 3700 * POLLPRI, POLLRDBAND ===> exception | |
| 3701 * POLLNORM, POLLWRBAND (and POLLMSG on some platforms) | |
| 3702 * are ignored. | |
| 3703 * | |
| 3704 * The output events POLLERR and POLLHUP are never turned on. | |
| 3705 * POLLNVAL may be turned on. | |
| 3706 */ | |
| 3707 | |
| 3708 if (events & (POLLIN | POLLRDNORM)) { | |
| 3709 FD_SET(osfd, &rd); | |
| 3710 fdHasEvent = PR_TRUE; | |
| 3711 } | |
| 3712 if (events & (POLLOUT | POLLWRNORM)) { | |
| 3713 FD_SET(osfd, &wr); | |
| 3714 fdHasEvent = PR_TRUE; | |
| 3715 } | |
| 3716 if (events & (POLLPRI | POLLRDBAND)) { | |
| 3717 FD_SET(osfd, &ex); | |
| 3718 fdHasEvent = PR_TRUE; | |
| 3719 } | |
| 3720 if (fdHasEvent && osfd > maxfd) { | |
| 3721 maxfd = osfd; | |
| 3722 } | |
| 3723 } | |
| 3724 | |
| 3725 rv = select(maxfd + 1, &rd, &wr, &ex, tvp); | |
| 3726 | |
| 3727 /* Compute poll results */ | |
| 3728 if (rv > 0) { | |
| 3729 rv = 0; | |
| 3730 for (i = 0; i < nfds; i++) { | |
| 3731 PRBool fdHasEvent = PR_FALSE; | |
| 3732 | |
| 3733 filedes[i].revents = 0; | |
| 3734 if (filedes[i].fd < 0) { | |
| 3735 continue; | |
| 3736 } | |
| 3737 if (FD_ISSET(filedes[i].fd, &rd)) { | |
| 3738 if (filedes[i].events & POLLIN) { | |
| 3739 filedes[i].revents |= POLLIN; | |
| 3740 } | |
| 3741 if (filedes[i].events & POLLRDNORM) { | |
| 3742 filedes[i].revents |= POLLRDNORM; | |
| 3743 } | |
| 3744 fdHasEvent = PR_TRUE; | |
| 3745 } | |
| 3746 if (FD_ISSET(filedes[i].fd, &wr)) { | |
| 3747 if (filedes[i].events & POLLOUT) { | |
| 3748 filedes[i].revents |= POLLOUT; | |
| 3749 } | |
| 3750 if (filedes[i].events & POLLWRNORM) { | |
| 3751 filedes[i].revents |= POLLWRNORM; | |
| 3752 } | |
| 3753 fdHasEvent = PR_TRUE; | |
| 3754 } | |
| 3755 if (FD_ISSET(filedes[i].fd, &ex)) { | |
| 3756 if (filedes[i].events & POLLPRI) { | |
| 3757 filedes[i].revents |= POLLPRI; | |
| 3758 } | |
| 3759 if (filedes[i].events & POLLRDBAND) { | |
| 3760 filedes[i].revents |= POLLRDBAND; | |
| 3761 } | |
| 3762 fdHasEvent = PR_TRUE; | |
| 3763 } | |
| 3764 if (fdHasEvent) { | |
| 3765 rv++; | |
| 3766 } | |
| 3767 } | |
| 3768 PR_ASSERT(rv > 0); | |
| 3769 } else if (rv == -1 && errno == EBADF) { | |
| 3770 rv = 0; | |
| 3771 for (i = 0; i < nfds; i++) { | |
| 3772 filedes[i].revents = 0; | |
| 3773 if (filedes[i].fd < 0) { | |
| 3774 continue; | |
| 3775 } | |
| 3776 if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) { | |
| 3777 filedes[i].revents = POLLNVAL; | |
| 3778 rv++; | |
| 3779 } | |
| 3780 } | |
| 3781 PR_ASSERT(rv > 0); | |
| 3782 } | |
| 3783 PR_ASSERT(-1 != timeout || rv != 0); | |
| 3784 | |
| 3785 return rv; | |
| 3786 } | |
| 3787 #endif /* _PR_NEED_FAKE_POLL */ | |
| OLD | NEW |